diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
240 files changed, 54921 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/.gitignore b/tools/testing/selftests/bpf/prog_tests/.gitignore new file mode 100644 index 000000000..89c4a3d37 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +tests.h diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c new file mode 100644 index 000000000..8baebb415 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -0,0 +1,696 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +#define MAX_INSNS 512 +#define MAX_MATCHES 24 + +struct bpf_reg_match { + unsigned int line; + const char *match; +}; + +struct bpf_align_test { + const char *descr; + struct bpf_insn insns[MAX_INSNS]; + enum { + UNDEF, + ACCEPT, + REJECT + } result; + enum bpf_prog_type prog_type; + /* Matches must be in order of increasing line */ + struct bpf_reg_match matches[MAX_MATCHES]; +}; + +static struct bpf_align_test tests[] = { + /* Four tests of known constants. These aren't staggeringly + * interesting since we track exact values now. + */ + { + .descr = "mov", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_3, 16), + BPF_MOV64_IMM(BPF_REG_3, 32), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + {0, "R1=ctx(off=0,imm=0)"}, + {0, "R10=fp0"}, + {0, "R3_w=2"}, + {1, "R3_w=4"}, + {2, "R3_w=8"}, + {3, "R3_w=16"}, + {4, "R3_w=32"}, + }, + }, + { + .descr = "shift", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_4, 32), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + {0, "R1=ctx(off=0,imm=0)"}, + {0, "R10=fp0"}, + {0, "R3_w=1"}, + {1, "R3_w=2"}, + {2, "R3_w=4"}, + {3, "R3_w=8"}, + {4, "R3_w=16"}, + {5, "R3_w=1"}, + {6, "R4_w=32"}, + {7, "R4_w=16"}, + {8, "R4_w=8"}, + {9, "R4_w=4"}, + {10, "R4_w=2"}, + }, + }, + { + .descr = "addsub", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + {0, "R1=ctx(off=0,imm=0)"}, + {0, "R10=fp0"}, + {0, "R3_w=4"}, + {1, "R3_w=8"}, + {2, "R3_w=10"}, + {3, "R4_w=8"}, + {4, "R4_w=12"}, + {5, "R4_w=14"}, + }, + }, + { + .descr = "mul", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 7), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + {0, "R1=ctx(off=0,imm=0)"}, + {0, "R10=fp0"}, + {0, "R3_w=7"}, + {1, "R3_w=7"}, + {2, "R3_w=14"}, + {3, "R3_w=56"}, + }, + }, + + /* Tests using unknown values */ +#define PREP_PKT_POINTERS \ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ + offsetof(struct __sk_buff, data)), \ + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ + offsetof(struct __sk_buff, data_end)) + +#define LOAD_UNKNOWN(DST_REG) \ + PREP_PKT_POINTERS, \ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \ + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \ + BPF_EXIT_INSN(), \ + BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0) + + { + .descr = "unknown shift", + .insns = { + LOAD_UNKNOWN(BPF_REG_3), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + LOAD_UNKNOWN(BPF_REG_4), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + {6, "R0_w=pkt(off=8,r=8,imm=0)"}, + {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, + {8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + {9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, + {10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, + {12, "R3_w=pkt_end(off=0,imm=0)"}, + {17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"}, + {19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, + {20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, + {21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + {22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, + }, + }, + { + .descr = "unknown mul", + .insns = { + LOAD_UNKNOWN(BPF_REG_3), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, + {11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + {13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, + {15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, + }, + }, + { + .descr = "packet const offset", + .insns = { + PREP_PKT_POINTERS, + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + + BPF_MOV64_IMM(BPF_REG_0, 0), + + /* Skip over ethernet header. */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3), + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0), + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + {2, "R5_w=pkt(off=0,r=0,imm=0)"}, + {4, "R5_w=pkt(off=14,r=0,imm=0)"}, + {5, "R4_w=pkt(off=14,r=0,imm=0)"}, + {9, "R2=pkt(off=0,r=18,imm=0)"}, + {10, "R5=pkt(off=14,r=18,imm=0)"}, + {10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"}, + {14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"}, + }, + }, + { + .descr = "packet variable offset", + .insns = { + LOAD_UNKNOWN(BPF_REG_6), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + + /* First, add a constant to the R5 packet pointer, + * then a variable with a known alignment. + */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), + + /* Now, test in the other direction. Adding first + * the variable offset to R5, then the constant. + */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), + + /* Test multiple accumulations of unknown values + * into a packet pointer. + */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + /* Calculated offset in R6 has unknown value, but known + * alignment of 4. + */ + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + /* Offset is added to packet pointer R5, resulting in + * known fixed offset, and variable offset from R6. + */ + {11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + /* At the time the word size load is performed from R5, + * it's total offset is NET_IP_ALIGN + reg->off (0) + + * reg->aux_off (14) which is 16. Then the variable + * offset is considered using reg->aux_off_align which + * is 4 and meets the load's requirements. + */ + {15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + {15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + /* Variable offset is added to R5 packet pointer, + * resulting in auxiliary alignment of 4. To avoid BPF + * verifier's precision backtracking logging + * interfering we also have a no-op R4 = R5 + * instruction to validate R5 state. We also check + * that R4 is what it should be in such case. + */ + {18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + {18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + /* Constant offset is added to R5, resulting in + * reg->off of 14. + */ + {19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off + * (14) which is 16. Then the variable offset is 4-byte + * aligned, so the total offset is 4-byte aligned and + * meets the load's requirements. + */ + {24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + {24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + /* Constant offset is added to R5 packet pointer, + * resulting in reg->off value of 14. + */ + {26, "R5_w=pkt(off=14,r=8"}, + /* Variable offset is added to R5, resulting in a + * variable offset of (4n). See comment for insn #18 + * for R4 = R5 trick. + */ + {28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + {28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + /* Constant is added to R5 again, setting reg->off to 18. */ + {29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, + /* And once more we add a variable; resulting var_off + * is still (4n), fixed offset is not changed. + * Also, we create a new reg->id. + */ + {31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"}, + {31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (18) + * which is 20. Then the variable offset is (4n), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ + {35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, + {35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, + }, + }, + { + .descr = "packet variable offset 2", + .insns = { + /* Create an unknown offset, (4n+2)-aligned */ + LOAD_UNKNOWN(BPF_REG_6), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), + /* Add it to the packet pointer */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), + /* Make a (4n) offset from the value we just read */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + /* Add it to the packet pointer */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + /* Calculated offset in R6 has unknown value, but known + * alignment of 4. + */ + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + /* Adding 14 makes R6 be (4n+2) */ + {8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"}, + /* Packet pointer has (4n+2) offset */ + {11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, + {12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ + {15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, + /* Newly read value in R6 was shifted left by 2, so has + * known alignment of 4. + */ + {17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + /* Added (4n) to packet pointer's (4n+2) var_off, giving + * another (4n+2). + */ + {19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, + {20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ + {23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, + }, + }, + { + .descr = "dubious pointer arithmetic", + .insns = { + PREP_PKT_POINTERS, + BPF_MOV64_IMM(BPF_REG_0, 0), + /* (ptr - ptr) << 2 */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2), + /* We have a (4n) value. Let's make a packet offset + * out of it. First add 14, to make it a (4n+2) + */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + /* Then make sure it's nonnegative */ + BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1), + BPF_EXIT_INSN(), + /* Add it to packet pointer */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .matches = { + {3, "R5_w=pkt_end(off=0,imm=0)"}, + /* (ptr - ptr) << 2 == unknown, (4n) */ + {5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"}, + /* (4n) + 14 == (4n+2). We blow our bounds, because + * the add could overflow. + */ + {6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, + /* Checked s>=0 */ + {9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + /* packet pointer + nonnegative (4n+2) */ + {11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. + * We checked the bounds, but it might have been able + * to overflow if the packet pointer started in the + * upper half of the address space. + * So we did not get a 'range' on R6, and the access + * attempt will fail. + */ + {15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + } + }, + { + .descr = "variable subtraction", + .insns = { + /* Create an unknown offset, (4n+2)-aligned */ + LOAD_UNKNOWN(BPF_REG_6), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), + /* Create another unknown, (4n)-aligned, and subtract + * it from the first one + */ + BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), + BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7), + /* Bounds-check the result */ + BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1), + BPF_EXIT_INSN(), + /* Add it to the packet pointer */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + /* Calculated offset in R6 has unknown value, but known + * alignment of 4. + */ + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + /* Adding 14 makes R6 be (4n+2) */ + {9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"}, + /* New unknown value in R7 is (4n) */ + {10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + /* Subtracting it from R6 blows our unsigned bounds */ + {11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, + /* Checked s>= 0 */ + {14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ + {20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"}, + + }, + }, + { + .descr = "pointer variable subtraction", + .insns = { + /* Create an unknown offset, (4n+2)-aligned and bounded + * to [14,74] + */ + LOAD_UNKNOWN(BPF_REG_6), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), + /* Subtract it from the packet pointer */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6), + /* Create another unknown, (4n)-aligned and >= 74. + * That in fact means >= 76, since 74 % 4 == 2 + */ + BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76), + /* Add it to the packet pointer */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + /* Calculated offset in R6 has unknown value, but known + * alignment of 4. + */ + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"}, + /* Adding 14 makes R6 be (4n+2) */ + {10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"}, + /* Subtracting from packet pointer overflows ubounds */ + {13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"}, + /* New unknown value in R7 is (4n), >= 76 */ + {14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"}, + /* Adding it to packet pointer gives nice bounds again */ + {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0x7fc)"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ + {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0x7fc)"}, + }, + }, +}; + +static int probe_filter_length(const struct bpf_insn *fp) +{ + int len; + + for (len = MAX_INSNS - 1; len > 0; --len) + if (fp[len].code != 0 || fp[len].imm != 0) + break; + return len + 1; +} + +static char bpf_vlog[32768]; + +static int do_test_single(struct bpf_align_test *test) +{ + struct bpf_insn *prog = test->insns; + int prog_type = test->prog_type; + char bpf_vlog_copy[32768]; + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .prog_flags = BPF_F_STRICT_ALIGNMENT, + .log_buf = bpf_vlog, + .log_size = sizeof(bpf_vlog), + .log_level = 2, + ); + const char *line_ptr; + int cur_line = -1; + int prog_len, i; + int fd_prog; + int ret; + + prog_len = probe_filter_length(prog); + fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", + prog, prog_len, &opts); + if (fd_prog < 0 && test->result != REJECT) { + printf("Failed to load program.\n"); + printf("%s", bpf_vlog); + ret = 1; + } else if (fd_prog >= 0 && test->result == REJECT) { + printf("Unexpected success to load!\n"); + printf("%s", bpf_vlog); + ret = 1; + close(fd_prog); + } else { + ret = 0; + /* We make a local copy so that we can strtok() it */ + strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy)); + line_ptr = strtok(bpf_vlog_copy, "\n"); + for (i = 0; i < MAX_MATCHES; i++) { + struct bpf_reg_match m = test->matches[i]; + int tmp; + + if (!m.match) + break; + while (line_ptr) { + cur_line = -1; + sscanf(line_ptr, "%u: ", &cur_line); + if (cur_line == -1) + sscanf(line_ptr, "from %u to %u: ", &tmp, &cur_line); + if (cur_line == m.line) + break; + line_ptr = strtok(NULL, "\n"); + } + if (!line_ptr) { + printf("Failed to find line %u for match: %s\n", + m.line, m.match); + ret = 1; + printf("%s", bpf_vlog); + break; + } + /* Check the next line as well in case the previous line + * did not have a corresponding bpf insn. Example: + * func#0 @0 + * 0: R1=ctx(off=0,imm=0) R10=fp0 + * 0: (b7) r3 = 2 ; R3_w=2 + */ + if (!strstr(line_ptr, m.match)) { + cur_line = -1; + line_ptr = strtok(NULL, "\n"); + sscanf(line_ptr, "%u: ", &cur_line); + } + if (cur_line != m.line || !line_ptr || + !strstr(line_ptr, m.match)) { + printf("Failed to find match %u: %s\n", + m.line, m.match); + ret = 1; + printf("%s", bpf_vlog); + break; + } + } + if (fd_prog >= 0) + close(fd_prog); + } + return ret; +} + +void test_align(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + struct bpf_align_test *test = &tests[i]; + + if (!test__start_subtest(test->descr)) + continue; + + ASSERT_OK(do_test_single(test), test->descr); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c new file mode 100644 index 000000000..b17bfa0e0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#include "test_progs.h" +#include "testing_helpers.h" + +static void init_test_filter_set(struct test_filter_set *set) +{ + set->cnt = 0; + set->tests = NULL; +} + +static void free_test_filter_set(struct test_filter_set *set) +{ + int i, j; + + for (i = 0; i < set->cnt; i++) { + for (j = 0; j < set->tests[i].subtest_cnt; j++) + free((void *)set->tests[i].subtests[j]); + free(set->tests[i].subtests); + free(set->tests[i].name); + } + + free(set->tests); + init_test_filter_set(set); +} + +static void test_parse_test_list(void) +{ + struct test_filter_set set; + + init_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing", &set, true), "parsing"); + if (!ASSERT_EQ(set.cnt, 1, "test filters count")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing,bpf_cookie", &set, true), + "parsing"); + if (!ASSERT_EQ(set.cnt, 2, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count"); + ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing/arg_parsing,bpf_cookie", + &set, + true), + "parsing"); + if (!ASSERT_EQ(set.cnt, 2, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count")) + goto error; + ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]), + "subtest name"); + ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing/arg_parsing", &set, true), + "parsing"); + ASSERT_OK(parse_test_list("bpf_cookie", &set, true), "parsing"); + ASSERT_OK(parse_test_list("send_signal", &set, true), "parsing"); + if (!ASSERT_EQ(set.cnt, 3, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count")) + goto error; + ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count"); + ASSERT_EQ(set.tests[2].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]), + "subtest name"); + ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name"); + ASSERT_OK(strcmp("send_signal", set.tests[2].name), "test name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("bpf_cookie/trace", &set, false), "parsing"); + if (!ASSERT_EQ(set.cnt, 1, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count")) + goto error; + ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name"); +error: + free_test_filter_set(&set); +} + +void test_arg_parsing(void) +{ + if (test__start_subtest("test_parse_test_list")) + test_parse_test_list(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/atomic_bounds.c b/tools/testing/selftests/bpf/prog_tests/atomic_bounds.c new file mode 100644 index 000000000..69bd7853e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/atomic_bounds.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "atomic_bounds.skel.h" + +void test_atomic_bounds(void) +{ + struct atomic_bounds *skel; + __u32 duration = 0; + + skel = atomic_bounds__open_and_load(); + if (CHECK(!skel, "skel_load", "couldn't load program\n")) + return; + + atomic_bounds__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c new file mode 100644 index 000000000..13e101f37 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/atomics.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "atomics.lskel.h" + +static void test_add(struct atomics_lskel *skel) +{ + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + /* No need to attach it, just run it directly */ + prog_fd = skel->progs.add.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->data->add64_value, 3, "add64_value"); + ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); + + ASSERT_EQ(skel->data->add32_value, 3, "add32_value"); + ASSERT_EQ(skel->bss->add32_result, 1, "add32_result"); + + ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value"); + ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result"); + + ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); +} + +static void test_sub(struct atomics_lskel *skel) +{ + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + /* No need to attach it, just run it directly */ + prog_fd = skel->progs.sub.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value"); + ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result"); + + ASSERT_EQ(skel->data->sub32_value, -1, "sub32_value"); + ASSERT_EQ(skel->bss->sub32_result, 1, "sub32_result"); + + ASSERT_EQ(skel->bss->sub_stack_value_copy, -1, "sub_stack_value"); + ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result"); + + ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value"); +} + +static void test_and(struct atomics_lskel *skel) +{ + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + /* No need to attach it, just run it directly */ + prog_fd = skel->progs.and.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value"); + ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result"); + + ASSERT_EQ(skel->data->and32_value, 0x010, "and32_value"); + ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result"); + + ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value"); +} + +static void test_or(struct atomics_lskel *skel) +{ + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + /* No need to attach it, just run it directly */ + prog_fd = skel->progs.or.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value"); + ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result"); + + ASSERT_EQ(skel->data->or32_value, 0x111, "or32_value"); + ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result"); + + ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value"); +} + +static void test_xor(struct atomics_lskel *skel) +{ + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + /* No need to attach it, just run it directly */ + prog_fd = skel->progs.xor.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value"); + ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result"); + + ASSERT_EQ(skel->data->xor32_value, 0x101, "xor32_value"); + ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result"); + + ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value"); +} + +static void test_cmpxchg(struct atomics_lskel *skel) +{ + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + /* No need to attach it, just run it directly */ + prog_fd = skel->progs.cmpxchg.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value"); + ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail"); + ASSERT_EQ(skel->bss->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed"); + + ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value"); + ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail"); + ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed"); +} + +static void test_xchg(struct atomics_lskel *skel) +{ + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + /* No need to attach it, just run it directly */ + prog_fd = skel->progs.xchg.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value"); + ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result"); + + ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value"); + ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result"); +} + +void test_atomics(void) +{ + struct atomics_lskel *skel; + + skel = atomics_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "atomics skeleton load")) + return; + + if (skel->data->skip_tests) { + printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)", + __func__); + test__skip(); + goto cleanup; + } + skel->bss->pid = getpid(); + + if (test__start_subtest("add")) + test_add(skel); + if (test__start_subtest("sub")) + test_sub(skel); + if (test__start_subtest("and")) + test_and(skel); + if (test__start_subtest("or")) + test_or(skel); + if (test__start_subtest("xor")) + test_xor(skel); + if (test__start_subtest("cmpxchg")) + test_cmpxchg(skel); + if (test__start_subtest("xchg")) + test_xchg(skel); + +cleanup: + atomics_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c new file mode 100644 index 000000000..9566d9d2f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "test_attach_probe.skel.h" + +/* this is how USDT semaphore is actually defined, except volatile modifier */ +volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes"))); + +/* uprobe attach point */ +static noinline void trigger_func(void) +{ + asm volatile (""); +} + +/* attach point for byname uprobe */ +static noinline void trigger_func2(void) +{ + asm volatile (""); +} + +/* attach point for byname sleepable uprobe */ +static noinline void trigger_func3(void) +{ + asm volatile (""); +} + +static char test_data[] = "test_data"; + +void test_attach_probe(void) +{ + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); + struct bpf_link *kprobe_link, *kretprobe_link; + struct bpf_link *uprobe_link, *uretprobe_link; + struct test_attach_probe* skel; + ssize_t uprobe_offset, ref_ctr_offset; + struct bpf_link *uprobe_err_link; + bool legacy; + char *mem; + + /* Check if new-style kprobe/uprobe API is supported. + * Kernels that support new FD-based kprobe and uprobe BPF attachment + * through perf_event_open() syscall expose + * /sys/bus/event_source/devices/kprobe/type and + * /sys/bus/event_source/devices/uprobe/type files, respectively. They + * contain magic numbers that are passed as "type" field of + * perf_event_attr. Lack of such file in the system indicates legacy + * kernel with old-style kprobe/uprobe attach interface through + * creating per-probe event through tracefs. For such cases + * ref_ctr_offset feature is not supported, so we don't test it. + */ + legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0; + + uprobe_offset = get_uprobe_offset(&trigger_func); + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) + return; + + ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr); + if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) + return; + + skel = test_attach_probe__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + /* sleepable kprobe test case needs flags set before loading */ + if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable, + BPF_F_SLEEPABLE), "kprobe_sleepable_flags")) + goto cleanup; + + if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load")) + goto cleanup; + if (!ASSERT_OK_PTR(skel->bss, "check_bss")) + goto cleanup; + + /* manual-attach kprobe/kretprobe */ + kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe, + false /* retprobe */, + SYS_NANOSLEEP_KPROBE_NAME); + if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe")) + goto cleanup; + skel->links.handle_kprobe = kprobe_link; + + kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe, + true /* retprobe */, + SYS_NANOSLEEP_KPROBE_NAME); + if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe")) + goto cleanup; + skel->links.handle_kretprobe = kretprobe_link; + + /* auto-attachable kprobe and kretprobe */ + skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto); + ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto"); + + skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto); + ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto"); + + if (!legacy) + ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); + + uprobe_opts.retprobe = false; + uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset; + uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, + 0 /* self pid */, + "/proc/self/exe", + uprobe_offset, + &uprobe_opts); + if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe")) + goto cleanup; + skel->links.handle_uprobe = uprobe_link; + + if (!legacy) + ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after"); + + /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */ + uprobe_opts.retprobe = true; + uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset; + uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, + -1 /* any pid */, + "/proc/self/exe", + uprobe_offset, &uprobe_opts); + if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe")) + goto cleanup; + skel->links.handle_uretprobe = uretprobe_link; + + /* verify auto-attach fails for old-style uprobe definition */ + uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname); + if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP, + "auto-attach should fail for old-style name")) + goto cleanup; + + uprobe_opts.func_name = "trigger_func2"; + uprobe_opts.retprobe = false; + uprobe_opts.ref_ctr_offset = 0; + skel->links.handle_uprobe_byname = + bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname, + 0 /* this pid */, + "/proc/self/exe", + 0, &uprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname")) + goto cleanup; + + /* verify auto-attach works */ + skel->links.handle_uretprobe_byname = + bpf_program__attach(skel->progs.handle_uretprobe_byname); + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname")) + goto cleanup; + + /* test attach by name for a library function, using the library + * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo(). + */ + uprobe_opts.func_name = "malloc"; + uprobe_opts.retprobe = false; + skel->links.handle_uprobe_byname2 = + bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2, + 0 /* this pid */, + "libc.so.6", + 0, &uprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2")) + goto cleanup; + + uprobe_opts.func_name = "free"; + uprobe_opts.retprobe = true; + skel->links.handle_uretprobe_byname2 = + bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2, + -1 /* any pid */, + "libc.so.6", + 0, &uprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2")) + goto cleanup; + + /* sleepable kprobes should not attach successfully */ + skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable); + if (!ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable")) + goto cleanup; + + /* test sleepable uprobe and uretprobe variants */ + skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable); + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable")) + goto cleanup; + + skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3); + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3")) + goto cleanup; + + skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable); + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable")) + goto cleanup; + + skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3); + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3")) + goto cleanup; + + skel->bss->user_ptr = test_data; + + /* trigger & validate kprobe && kretprobe */ + usleep(1); + + /* trigger & validate shared library u[ret]probes attached by name */ + mem = malloc(1); + free(mem); + + /* trigger & validate uprobe & uretprobe */ + trigger_func(); + + /* trigger & validate uprobe attached by name */ + trigger_func2(); + + /* trigger & validate sleepable uprobe attached by name */ + trigger_func3(); + + ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res"); + ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res"); + ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res"); + ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res"); + ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res"); + ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res"); + ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res"); + ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res"); + ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res"); + ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res"); + ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res"); + ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res"); + ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res"); + ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res"); + +cleanup: + test_attach_probe__destroy(skel); + ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/autoattach.c b/tools/testing/selftests/bpf/prog_tests/autoattach.c new file mode 100644 index 000000000..dc5e01d27 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/autoattach.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Google */ + +#include <test_progs.h> +#include "test_autoattach.skel.h" + +void test_autoattach(void) +{ + struct test_autoattach *skel; + + skel = test_autoattach__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + goto cleanup; + + /* disable auto-attach for prog2 */ + bpf_program__set_autoattach(skel->progs.prog2, false); + ASSERT_TRUE(bpf_program__autoattach(skel->progs.prog1), "autoattach_prog1"); + ASSERT_FALSE(bpf_program__autoattach(skel->progs.prog2), "autoattach_prog2"); + if (!ASSERT_OK(test_autoattach__attach(skel), "skel_attach")) + goto cleanup; + + usleep(1); + + ASSERT_TRUE(skel->bss->prog1_called, "attached_prog1"); + ASSERT_FALSE(skel->bss->prog2_called, "attached_prog2"); + +cleanup: + test_autoattach__destroy(skel); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/autoload.c b/tools/testing/selftests/bpf/prog_tests/autoload.c new file mode 100644 index 000000000..3693f7d13 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/autoload.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include <time.h> +#include "test_autoload.skel.h" + +void test_autoload(void) +{ + int duration = 0, err; + struct test_autoload* skel; + + skel = test_autoload__open_and_load(); + /* prog3 should be broken */ + if (CHECK(skel, "skel_open_and_load", "unexpected success\n")) + goto cleanup; + + skel = test_autoload__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + goto cleanup; + + /* don't load prog3 */ + bpf_program__set_autoload(skel->progs.prog3, false); + + err = test_autoload__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + goto cleanup; + + err = test_autoload__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + usleep(1); + + CHECK(!skel->bss->prog1_called, "prog1", "not called\n"); + CHECK(!skel->bss->prog2_called, "prog2", "not called\n"); + CHECK(skel->bss->prog3_called, "prog3", "called?!\n"); + +cleanup: + test_autoload__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bind_perm.c b/tools/testing/selftests/bpf/prog_tests/bind_perm.c new file mode 100644 index 000000000..a1766a298 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bind_perm.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <sched.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/socket.h> + +#include "test_progs.h" +#include "cap_helpers.h" +#include "bind_perm.skel.h" + +static int duration; + +static int create_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return -1; + + return 0; +} + +void try_bind(int family, int port, int expected_errno) +{ + struct sockaddr_storage addr = {}; + struct sockaddr_in6 *sin6; + struct sockaddr_in *sin; + int fd = -1; + + fd = socket(family, SOCK_STREAM, 0); + if (CHECK(fd < 0, "fd", "errno %d", errno)) + goto close_socket; + + if (family == AF_INET) { + sin = (struct sockaddr_in *)&addr; + sin->sin_family = family; + sin->sin_port = htons(port); + } else { + sin6 = (struct sockaddr_in6 *)&addr; + sin6->sin6_family = family; + sin6->sin6_port = htons(port); + } + + errno = 0; + bind(fd, (struct sockaddr *)&addr, sizeof(addr)); + ASSERT_EQ(errno, expected_errno, "bind"); + +close_socket: + if (fd >= 0) + close(fd); +} + +void test_bind_perm(void) +{ + const __u64 net_bind_svc_cap = 1ULL << CAP_NET_BIND_SERVICE; + struct bind_perm *skel; + __u64 old_caps = 0; + int cgroup_fd; + + if (create_netns()) + return; + + cgroup_fd = test__join_cgroup("/bind_perm"); + if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno)) + return; + + skel = bind_perm__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + goto close_cgroup_fd; + + skel->links.bind_v4_prog = bpf_program__attach_cgroup(skel->progs.bind_v4_prog, cgroup_fd); + if (!ASSERT_OK_PTR(skel, "bind_v4_prog")) + goto close_skeleton; + + skel->links.bind_v6_prog = bpf_program__attach_cgroup(skel->progs.bind_v6_prog, cgroup_fd); + if (!ASSERT_OK_PTR(skel, "bind_v6_prog")) + goto close_skeleton; + + ASSERT_OK(cap_disable_effective(net_bind_svc_cap, &old_caps), + "cap_disable_effective"); + + try_bind(AF_INET, 110, EACCES); + try_bind(AF_INET6, 110, EACCES); + + try_bind(AF_INET, 111, 0); + try_bind(AF_INET6, 111, 0); + + if (old_caps & net_bind_svc_cap) + ASSERT_OK(cap_enable_effective(net_bind_svc_cap, NULL), + "cap_enable_effective"); + +close_skeleton: + bind_perm__destroy(skel); +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c new file mode 100644 index 000000000..d2d9e965e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <sys/syscall.h> +#include <test_progs.h> +#include "bloom_filter_map.skel.h" + +static void test_fail_cases(void) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts); + __u32 value; + int fd, err; + + /* Invalid key size */ + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 4, sizeof(value), 100, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid key size")) + close(fd); + + /* Invalid value size */ + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, 0, 100, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value size 0")) + close(fd); + + /* Invalid max entries size */ + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 0, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid max entries size")) + close(fd); + + /* Bloom filter maps do not support BPF_F_NO_PREALLOC */ + opts.map_flags = BPF_F_NO_PREALLOC; + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid flags")) + close(fd); + + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, NULL); + if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter")) + return; + + /* Test invalid flags */ + err = bpf_map_update_elem(fd, NULL, &value, -1); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, BPF_EXIST); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, BPF_F_LOCK); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, BPF_NOEXIST); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + err = bpf_map_update_elem(fd, NULL, &value, 10000); + ASSERT_EQ(err, -EINVAL, "bpf_map_update_elem bloom filter invalid flags"); + + close(fd); +} + +static void test_success_cases(void) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts); + char value[11]; + int fd, err; + + /* Create a map */ + opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE; + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); + if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case")) + return; + + /* Add a value to the bloom filter */ + err = bpf_map_update_elem(fd, NULL, &value, 0); + if (!ASSERT_OK(err, "bpf_map_update_elem bloom filter success case")) + goto done; + + /* Lookup a value in the bloom filter */ + err = bpf_map_lookup_elem(fd, NULL, &value); + ASSERT_OK(err, "bpf_map_update_elem bloom filter success case"); + +done: + close(fd); +} + +static void check_bloom(struct bloom_filter_map *skel) +{ + struct bpf_link *link; + + link = bpf_program__attach(skel->progs.check_bloom); + if (!ASSERT_OK_PTR(link, "link")) + return; + + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->error, 0, "error"); + + bpf_link__destroy(link); +} + +static void test_inner_map(struct bloom_filter_map *skel, const __u32 *rand_vals, + __u32 nr_rand_vals) +{ + int outer_map_fd, inner_map_fd, err, i, key = 0; + struct bpf_link *link; + + /* Create a bloom filter map that will be used as the inner map */ + inner_map_fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(*rand_vals), + nr_rand_vals, NULL); + if (!ASSERT_GE(inner_map_fd, 0, "bpf_map_create bloom filter inner map")) + return; + + for (i = 0; i < nr_rand_vals; i++) { + err = bpf_map_update_elem(inner_map_fd, NULL, rand_vals + i, BPF_ANY); + if (!ASSERT_OK(err, "Add random value to inner_map_fd")) + goto done; + } + + /* Add the bloom filter map to the outer map */ + outer_map_fd = bpf_map__fd(skel->maps.outer_map); + err = bpf_map_update_elem(outer_map_fd, &key, &inner_map_fd, BPF_ANY); + if (!ASSERT_OK(err, "Add bloom filter map to outer map")) + goto done; + + /* Attach the bloom_filter_inner_map prog */ + link = bpf_program__attach(skel->progs.inner_map); + if (!ASSERT_OK_PTR(link, "link")) + goto delete_inner_map; + + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->error, 0, "error"); + + bpf_link__destroy(link); + +delete_inner_map: + /* Ensure the inner bloom filter map can be deleted */ + err = bpf_map_delete_elem(outer_map_fd, &key); + ASSERT_OK(err, "Delete inner bloom filter map"); + +done: + close(inner_map_fd); +} + +static int setup_progs(struct bloom_filter_map **out_skel, __u32 **out_rand_vals, + __u32 *out_nr_rand_vals) +{ + struct bloom_filter_map *skel; + int random_data_fd, bloom_fd; + __u32 *rand_vals = NULL; + __u32 map_size, val; + int err, i; + + /* Set up a bloom filter map skeleton */ + skel = bloom_filter_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bloom_filter_map__open_and_load")) + return -EINVAL; + + /* Set up rand_vals */ + map_size = bpf_map__max_entries(skel->maps.map_random_data); + rand_vals = malloc(sizeof(*rand_vals) * map_size); + if (!rand_vals) { + err = -ENOMEM; + goto error; + } + + /* Generate random values and populate both skeletons */ + random_data_fd = bpf_map__fd(skel->maps.map_random_data); + bloom_fd = bpf_map__fd(skel->maps.map_bloom); + for (i = 0; i < map_size; i++) { + val = rand(); + + err = bpf_map_update_elem(random_data_fd, &i, &val, BPF_ANY); + if (!ASSERT_OK(err, "Add random value to map_random_data")) + goto error; + + err = bpf_map_update_elem(bloom_fd, NULL, &val, BPF_ANY); + if (!ASSERT_OK(err, "Add random value to map_bloom")) + goto error; + + rand_vals[i] = val; + } + + *out_skel = skel; + *out_rand_vals = rand_vals; + *out_nr_rand_vals = map_size; + + return 0; + +error: + bloom_filter_map__destroy(skel); + if (rand_vals) + free(rand_vals); + return err; +} + +void test_bloom_filter_map(void) +{ + __u32 *rand_vals, nr_rand_vals; + struct bloom_filter_map *skel; + int err; + + test_fail_cases(); + test_success_cases(); + + err = setup_progs(&skel, &rand_vals, &nr_rand_vals); + if (err) + return; + + test_inner_map(skel, rand_vals, nr_rand_vals); + free(rand_vals); + + check_bloom(skel); + + bloom_filter_map__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c new file mode 100644 index 000000000..2be2d6195 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -0,0 +1,526 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <sys/syscall.h> +#include <sys/mman.h> +#include <unistd.h> +#include <test_progs.h> +#include <network_helpers.h> +#include <bpf/btf.h> +#include "test_bpf_cookie.skel.h" +#include "kprobe_multi.skel.h" + +/* uprobe attach point */ +static noinline void trigger_func(void) +{ + asm volatile (""); +} + +static void kprobe_subtest(struct test_bpf_cookie *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); + struct bpf_link *link1 = NULL, *link2 = NULL; + struct bpf_link *retlink1 = NULL, *retlink2 = NULL; + + /* attach two kprobes */ + opts.bpf_cookie = 0x1; + opts.retprobe = false; + link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, + SYS_NANOSLEEP_KPROBE_NAME, &opts); + if (!ASSERT_OK_PTR(link1, "link1")) + goto cleanup; + + opts.bpf_cookie = 0x2; + opts.retprobe = false; + link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, + SYS_NANOSLEEP_KPROBE_NAME, &opts); + if (!ASSERT_OK_PTR(link2, "link2")) + goto cleanup; + + /* attach two kretprobes */ + opts.bpf_cookie = 0x10; + opts.retprobe = true; + retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, + SYS_NANOSLEEP_KPROBE_NAME, &opts); + if (!ASSERT_OK_PTR(retlink1, "retlink1")) + goto cleanup; + + opts.bpf_cookie = 0x20; + opts.retprobe = true; + retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, + SYS_NANOSLEEP_KPROBE_NAME, &opts); + if (!ASSERT_OK_PTR(retlink2, "retlink2")) + goto cleanup; + + /* trigger kprobe && kretprobe */ + usleep(1); + + ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res"); + ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res"); + +cleanup: + bpf_link__destroy(link1); + bpf_link__destroy(link2); + bpf_link__destroy(retlink1); + bpf_link__destroy(retlink2); +} + +static void kprobe_multi_test_run(struct kprobe_multi *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + prog_fd = bpf_program__fd(skel->progs.trigger); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); + ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); + ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); + ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); + ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); + ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); + ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); + ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); + + ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); + ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); + ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); + ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); + ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); + ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); + ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); + ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); +} + +static void kprobe_multi_link_api_subtest(void) +{ + int prog_fd, link1_fd = -1, link2_fd = -1; + struct kprobe_multi *skel = NULL; + LIBBPF_OPTS(bpf_link_create_opts, opts); + unsigned long long addrs[8]; + __u64 cookies[8]; + + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + goto cleanup; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + skel->bss->test_cookie = true; + +#define GET_ADDR(__sym, __addr) ({ \ + __addr = ksym_get_addr(__sym); \ + if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \ + goto cleanup; \ +}) + + GET_ADDR("bpf_fentry_test1", addrs[0]); + GET_ADDR("bpf_fentry_test3", addrs[1]); + GET_ADDR("bpf_fentry_test4", addrs[2]); + GET_ADDR("bpf_fentry_test5", addrs[3]); + GET_ADDR("bpf_fentry_test6", addrs[4]); + GET_ADDR("bpf_fentry_test7", addrs[5]); + GET_ADDR("bpf_fentry_test2", addrs[6]); + GET_ADDR("bpf_fentry_test8", addrs[7]); + +#undef GET_ADDR + + cookies[0] = 1; /* bpf_fentry_test1 */ + cookies[1] = 2; /* bpf_fentry_test3 */ + cookies[2] = 3; /* bpf_fentry_test4 */ + cookies[3] = 4; /* bpf_fentry_test5 */ + cookies[4] = 5; /* bpf_fentry_test6 */ + cookies[5] = 6; /* bpf_fentry_test7 */ + cookies[6] = 7; /* bpf_fentry_test2 */ + cookies[7] = 8; /* bpf_fentry_test8 */ + + opts.kprobe_multi.addrs = (const unsigned long *) &addrs; + opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); + opts.kprobe_multi.cookies = (const __u64 *) &cookies; + prog_fd = bpf_program__fd(skel->progs.test_kprobe); + + link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); + if (!ASSERT_GE(link1_fd, 0, "link1_fd")) + goto cleanup; + + cookies[0] = 8; /* bpf_fentry_test1 */ + cookies[1] = 7; /* bpf_fentry_test3 */ + cookies[2] = 6; /* bpf_fentry_test4 */ + cookies[3] = 5; /* bpf_fentry_test5 */ + cookies[4] = 4; /* bpf_fentry_test6 */ + cookies[5] = 3; /* bpf_fentry_test7 */ + cookies[6] = 2; /* bpf_fentry_test2 */ + cookies[7] = 1; /* bpf_fentry_test8 */ + + opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.test_kretprobe); + + link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); + if (!ASSERT_GE(link2_fd, 0, "link2_fd")) + goto cleanup; + + kprobe_multi_test_run(skel); + +cleanup: + close(link1_fd); + close(link2_fd); + kprobe_multi__destroy(skel); +} + +static void kprobe_multi_attach_api_subtest(void) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct kprobe_multi *skel = NULL; + const char *syms[8] = { + "bpf_fentry_test1", + "bpf_fentry_test3", + "bpf_fentry_test4", + "bpf_fentry_test5", + "bpf_fentry_test6", + "bpf_fentry_test7", + "bpf_fentry_test2", + "bpf_fentry_test8", + }; + __u64 cookies[8]; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + skel->bss->test_cookie = true; + + cookies[0] = 1; /* bpf_fentry_test1 */ + cookies[1] = 2; /* bpf_fentry_test3 */ + cookies[2] = 3; /* bpf_fentry_test4 */ + cookies[3] = 4; /* bpf_fentry_test5 */ + cookies[4] = 5; /* bpf_fentry_test6 */ + cookies[5] = 6; /* bpf_fentry_test7 */ + cookies[6] = 7; /* bpf_fentry_test2 */ + cookies[7] = 8; /* bpf_fentry_test8 */ + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = cookies; + + link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + NULL, &opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + cookies[0] = 8; /* bpf_fentry_test1 */ + cookies[1] = 7; /* bpf_fentry_test3 */ + cookies[2] = 6; /* bpf_fentry_test4 */ + cookies[3] = 5; /* bpf_fentry_test5 */ + cookies[4] = 4; /* bpf_fentry_test6 */ + cookies[5] = 3; /* bpf_fentry_test7 */ + cookies[6] = 2; /* bpf_fentry_test2 */ + cookies[7] = 1; /* bpf_fentry_test8 */ + + opts.retprobe = true; + + link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, + NULL, &opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + kprobe_multi_test_run(skel); + +cleanup: + bpf_link__destroy(link2); + bpf_link__destroy(link1); + kprobe_multi__destroy(skel); +} +static void uprobe_subtest(struct test_bpf_cookie *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); + struct bpf_link *link1 = NULL, *link2 = NULL; + struct bpf_link *retlink1 = NULL, *retlink2 = NULL; + ssize_t uprobe_offset; + + uprobe_offset = get_uprobe_offset(&trigger_func); + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) + goto cleanup; + + /* attach two uprobes */ + opts.bpf_cookie = 0x100; + opts.retprobe = false; + link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */, + "/proc/self/exe", uprobe_offset, &opts); + if (!ASSERT_OK_PTR(link1, "link1")) + goto cleanup; + + opts.bpf_cookie = 0x200; + opts.retprobe = false; + link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */, + "/proc/self/exe", uprobe_offset, &opts); + if (!ASSERT_OK_PTR(link2, "link2")) + goto cleanup; + + /* attach two uretprobes */ + opts.bpf_cookie = 0x1000; + opts.retprobe = true; + retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */, + "/proc/self/exe", uprobe_offset, &opts); + if (!ASSERT_OK_PTR(retlink1, "retlink1")) + goto cleanup; + + opts.bpf_cookie = 0x2000; + opts.retprobe = true; + retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */, + "/proc/self/exe", uprobe_offset, &opts); + if (!ASSERT_OK_PTR(retlink2, "retlink2")) + goto cleanup; + + /* trigger uprobe && uretprobe */ + trigger_func(); + + ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res"); + ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res"); + +cleanup: + bpf_link__destroy(link1); + bpf_link__destroy(link2); + bpf_link__destroy(retlink1); + bpf_link__destroy(retlink2); +} + +static void tp_subtest(struct test_bpf_cookie *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts); + struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL; + + /* attach first tp prog */ + opts.bpf_cookie = 0x10000; + link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1, + "syscalls", "sys_enter_nanosleep", &opts); + if (!ASSERT_OK_PTR(link1, "link1")) + goto cleanup; + + /* attach second tp prog */ + opts.bpf_cookie = 0x20000; + link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2, + "syscalls", "sys_enter_nanosleep", &opts); + if (!ASSERT_OK_PTR(link2, "link2")) + goto cleanup; + + /* trigger tracepoints */ + usleep(1); + + ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1"); + + /* now we detach first prog and will attach third one, which causes + * two internal calls to bpf_prog_array_copy(), shuffling + * bpf_prog_array_items around. We test here that we don't lose track + * of associated bpf_cookies. + */ + bpf_link__destroy(link1); + link1 = NULL; + kern_sync_rcu(); + skel->bss->tp_res = 0; + + /* attach third tp prog */ + opts.bpf_cookie = 0x40000; + link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3, + "syscalls", "sys_enter_nanosleep", &opts); + if (!ASSERT_OK_PTR(link3, "link3")) + goto cleanup; + + /* trigger tracepoints */ + usleep(1); + + ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2"); + +cleanup: + bpf_link__destroy(link1); + bpf_link__destroy(link2); + bpf_link__destroy(link3); +} + +static void burn_cpu(void) +{ + volatile int j = 0; + cpu_set_t cpu_set; + int i, err; + + /* generate some branches on cpu 0 */ + CPU_ZERO(&cpu_set); + CPU_SET(0, &cpu_set); + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); + ASSERT_OK(err, "set_thread_affinity"); + + /* spin the loop for a while (random high number) */ + for (i = 0; i < 1000000; ++i) + ++j; +} + +static void pe_subtest(struct test_bpf_cookie *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts); + struct bpf_link *link = NULL; + struct perf_event_attr attr; + int pfd = -1; + + /* create perf event */ + memset(&attr, 0, sizeof(attr)); + attr.size = sizeof(attr); + attr.type = PERF_TYPE_SOFTWARE; + attr.config = PERF_COUNT_SW_CPU_CLOCK; + attr.freq = 1; + attr.sample_freq = 1000; + pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); + if (!ASSERT_GE(pfd, 0, "perf_fd")) + goto cleanup; + + opts.bpf_cookie = 0x100000; + link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); + if (!ASSERT_OK_PTR(link, "link1")) + goto cleanup; + + burn_cpu(); /* trigger BPF prog */ + + ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1"); + + /* prevent bpf_link__destroy() closing pfd itself */ + bpf_link__disconnect(link); + /* close BPF link's FD explicitly */ + close(bpf_link__fd(link)); + /* free up memory used by struct bpf_link */ + bpf_link__destroy(link); + link = NULL; + kern_sync_rcu(); + skel->bss->pe_res = 0; + + opts.bpf_cookie = 0x200000; + link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts); + if (!ASSERT_OK_PTR(link, "link2")) + goto cleanup; + + burn_cpu(); /* trigger BPF prog */ + + ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2"); + +cleanup: + close(pfd); + bpf_link__destroy(link); +} + +static void tracing_subtest(struct test_bpf_cookie *skel) +{ + __u64 cookie; + int prog_fd; + int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1; + LIBBPF_OPTS(bpf_test_run_opts, opts); + LIBBPF_OPTS(bpf_link_create_opts, link_opts); + + skel->bss->fentry_res = 0; + skel->bss->fexit_res = 0; + + cookie = 0x10000000000000L; + prog_fd = bpf_program__fd(skel->progs.fentry_test1); + link_opts.tracing.cookie = cookie; + fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts); + if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create")) + goto cleanup; + + cookie = 0x20000000000000L; + prog_fd = bpf_program__fd(skel->progs.fexit_test1); + link_opts.tracing.cookie = cookie; + fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts); + if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create")) + goto cleanup; + + cookie = 0x30000000000000L; + prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); + link_opts.tracing.cookie = cookie; + fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts); + if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.fentry_test1); + bpf_prog_test_run_opts(prog_fd, &opts); + + prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); + bpf_prog_test_run_opts(prog_fd, &opts); + + ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res"); + ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res"); + ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res"); + +cleanup: + if (fentry_fd >= 0) + close(fentry_fd); + if (fexit_fd >= 0) + close(fexit_fd); + if (fmod_ret_fd >= 0) + close(fmod_ret_fd); +} + +int stack_mprotect(void); + +static void lsm_subtest(struct test_bpf_cookie *skel) +{ + __u64 cookie; + int prog_fd; + int lsm_fd = -1; + LIBBPF_OPTS(bpf_link_create_opts, link_opts); + + skel->bss->lsm_res = 0; + + cookie = 0x90000000000090L; + prog_fd = bpf_program__fd(skel->progs.test_int_hook); + link_opts.tracing.cookie = cookie; + lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts); + if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create")) + goto cleanup; + + stack_mprotect(); + if (!ASSERT_EQ(errno, EPERM, "stack_mprotect")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res"); + +cleanup: + if (lsm_fd >= 0) + close(lsm_fd); +} + +void test_bpf_cookie(void) +{ + struct test_bpf_cookie *skel; + + skel = test_bpf_cookie__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->my_tid = syscall(SYS_gettid); + + if (test__start_subtest("kprobe")) + kprobe_subtest(skel); + if (test__start_subtest("multi_kprobe_link_api")) + kprobe_multi_link_api_subtest(); + if (test__start_subtest("multi_kprobe_attach_api")) + kprobe_multi_attach_api_subtest(); + if (test__start_subtest("uprobe")) + uprobe_subtest(skel); + if (test__start_subtest("tracepoint")) + tp_subtest(skel); + if (test__start_subtest("perf_event")) + pe_subtest(skel); + if (test__start_subtest("trampoline")) + tracing_subtest(skel); + if (test__start_subtest("lsm")) + lsm_subtest(skel); + + test_bpf_cookie__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c new file mode 100644 index 000000000..0c67a4e28 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -0,0 +1,1641 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> +#include <unistd.h> +#include <sys/syscall.h> +#include "bpf_iter_ipv6_route.skel.h" +#include "bpf_iter_netlink.skel.h" +#include "bpf_iter_bpf_map.skel.h" +#include "bpf_iter_task.skel.h" +#include "bpf_iter_task_stack.skel.h" +#include "bpf_iter_task_file.skel.h" +#include "bpf_iter_task_vma.skel.h" +#include "bpf_iter_task_btf.skel.h" +#include "bpf_iter_tcp4.skel.h" +#include "bpf_iter_tcp6.skel.h" +#include "bpf_iter_udp4.skel.h" +#include "bpf_iter_udp6.skel.h" +#include "bpf_iter_unix.skel.h" +#include "bpf_iter_vma_offset.skel.h" +#include "bpf_iter_test_kern1.skel.h" +#include "bpf_iter_test_kern2.skel.h" +#include "bpf_iter_test_kern3.skel.h" +#include "bpf_iter_test_kern4.skel.h" +#include "bpf_iter_bpf_hash_map.skel.h" +#include "bpf_iter_bpf_percpu_hash_map.skel.h" +#include "bpf_iter_bpf_array_map.skel.h" +#include "bpf_iter_bpf_percpu_array_map.skel.h" +#include "bpf_iter_bpf_sk_storage_helpers.skel.h" +#include "bpf_iter_bpf_sk_storage_map.skel.h" +#include "bpf_iter_test_kern5.skel.h" +#include "bpf_iter_test_kern6.skel.h" +#include "bpf_iter_bpf_link.skel.h" +#include "bpf_iter_ksym.skel.h" +#include "bpf_iter_sockmap.skel.h" + +static int duration; + +static void test_btf_id_or_null(void) +{ + struct bpf_iter_test_kern3 *skel; + + skel = bpf_iter_test_kern3__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) { + bpf_iter_test_kern3__destroy(skel); + return; + } +} + +static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts) +{ + struct bpf_link *link; + char buf[16] = {}; + int iter_fd, len; + + link = bpf_program__attach_iter(prog, opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + /* not check contents, but ensure read() ends without error */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); + + close(iter_fd); + +free_link: + bpf_link__destroy(link); +} + +static void do_dummy_read(struct bpf_program *prog) +{ + do_dummy_read_opts(prog, NULL); +} + +static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog, + struct bpf_map *map) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link *link; + char buf[16] = {}; + int iter_fd, len; + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = bpf_map__fd(map); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(prog, &opts); + if (!ASSERT_OK_PTR(link, "attach_map_iter")) + return; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) { + bpf_link__destroy(link); + return; + } + + /* Close link and map fd prematurely */ + bpf_link__destroy(link); + bpf_object__destroy_skeleton(*skel); + *skel = NULL; + + /* Try to let map free work to run first if map is freed */ + usleep(100); + /* Memory used by both sock map and sock local storage map are + * freed after two synchronize_rcu() calls, so wait for it + */ + kern_sync_rcu(); + kern_sync_rcu(); + + /* Read after both map fd and link fd are closed */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + ASSERT_GE(len, 0, "read_iterator"); + + close(iter_fd); +} + +static int read_fd_into_buffer(int fd, char *buf, int size) +{ + int bufleft = size; + int len; + + do { + len = read(fd, buf, bufleft); + if (len > 0) { + buf += len; + bufleft -= len; + } + } while (len > 0); + + return len < 0 ? len : size - bufleft; +} + +static void test_ipv6_route(void) +{ + struct bpf_iter_ipv6_route *skel; + + skel = bpf_iter_ipv6_route__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_ipv6_route); + + bpf_iter_ipv6_route__destroy(skel); +} + +static void test_netlink(void) +{ + struct bpf_iter_netlink *skel; + + skel = bpf_iter_netlink__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_netlink); + + bpf_iter_netlink__destroy(skel); +} + +static void test_bpf_map(void) +{ + struct bpf_iter_bpf_map *skel; + + skel = bpf_iter_bpf_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_bpf_map); + + bpf_iter_bpf_map__destroy(skel); +} + +static int pidfd_open(pid_t pid, unsigned int flags) +{ + return syscall(SYS_pidfd_open, pid, flags); +} + +static void check_bpf_link_info(const struct bpf_program *prog) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link_info info = {}; + struct bpf_link *link; + __u32 info_len; + int err; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.tid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(prog, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return; + + info_len = sizeof(info); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len); + ASSERT_OK(err, "bpf_obj_get_info_by_fd"); + ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid"); + + bpf_link__destroy(link); +} + +static pthread_mutex_t do_nothing_mutex; + +static void *do_nothing_wait(void *arg) +{ + pthread_mutex_lock(&do_nothing_mutex); + pthread_mutex_unlock(&do_nothing_mutex); + + pthread_exit(arg); +} + +static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts, + int *num_unknown, int *num_known) +{ + struct bpf_iter_task *skel; + pthread_t thread_id; + void *ret; + + skel = bpf_iter_task__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) + return; + + ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"); + + ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), + "pthread_create"); + + skel->bss->tid = getpid(); + + do_dummy_read_opts(skel->progs.dump_task, opts); + + *num_unknown = skel->bss->num_unknown_tid; + *num_known = skel->bss->num_known_tid; + + ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); + ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, + "pthread_join"); + + bpf_iter_task__destroy(skel); +} + +static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known) +{ + int num_unknown_tid, num_known_tid; + + test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid); + ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid"); + ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid"); +} + +static void test_task_tid(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + int num_unknown_tid, num_known_tid; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.tid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + test_task_common(&opts, 0, 1); + + linfo.task.tid = 0; + linfo.task.pid = getpid(); + test_task_common(&opts, 1, 1); + + test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid); + ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid"); + ASSERT_EQ(num_known_tid, 1, "check_num_known_tid"); +} + +static void test_task_pid(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.pid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + test_task_common(&opts, 1, 1); +} + +static void test_task_pidfd(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + int pidfd; + + pidfd = pidfd_open(getpid(), 0); + if (!ASSERT_GT(pidfd, 0, "pidfd_open")) + return; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.pid_fd = pidfd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + test_task_common(&opts, 1, 1); + + close(pidfd); +} + +static void test_task_sleepable(void) +{ + struct bpf_iter_task *skel; + + skel = bpf_iter_task__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_task_sleepable); + + ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0, + "num_expected_failure_copy_from_user_task"); + ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0, + "num_success_copy_from_user_task"); + + bpf_iter_task__destroy(skel); +} + +static void test_task_stack(void) +{ + struct bpf_iter_task_stack *skel; + + skel = bpf_iter_task_stack__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_task_stack); + do_dummy_read(skel->progs.get_task_user_stacks); + + ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks"); + + bpf_iter_task_stack__destroy(skel); +} + +static void test_task_file(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_task_file *skel; + union bpf_iter_link_info linfo; + pthread_t thread_id; + void *ret; + + skel = bpf_iter_task_file__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load")) + return; + + skel->bss->tgid = getpid(); + + ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"); + + ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), + "pthread_create"); + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.tid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + do_dummy_read_opts(skel->progs.dump_task_file, &opts); + + ASSERT_EQ(skel->bss->count, 0, "check_count"); + ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count"); + + skel->bss->last_tgid = 0; + skel->bss->count = 0; + skel->bss->unique_tgid_count = 0; + + do_dummy_read(skel->progs.dump_task_file); + + ASSERT_EQ(skel->bss->count, 0, "check_count"); + ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count"); + + check_bpf_link_info(skel->progs.dump_task_file); + + ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); + ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join"); + ASSERT_NULL(ret, "pthread_join"); + + bpf_iter_task_file__destroy(skel); +} + +#define TASKBUFSZ 32768 + +static char taskbuf[TASKBUFSZ]; + +static int do_btf_read(struct bpf_iter_task_btf *skel) +{ + struct bpf_program *prog = skel->progs.dump_task_struct; + struct bpf_iter_task_btf__bss *bss = skel->bss; + int iter_fd = -1, err; + struct bpf_link *link; + char *buf = taskbuf; + int ret = 0; + + link = bpf_program__attach_iter(prog, NULL); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return ret; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ); + if (bss->skip) { + printf("%s:SKIP:no __builtin_btf_type_id\n", __func__); + ret = 1; + test__skip(); + goto free_link; + } + + if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno))) + goto free_link; + + ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)", + "check for btf representation of task_struct in iter data"); +free_link: + if (iter_fd > 0) + close(iter_fd); + bpf_link__destroy(link); + return ret; +} + +static void test_task_btf(void) +{ + struct bpf_iter_task_btf__bss *bss; + struct bpf_iter_task_btf *skel; + int ret; + + skel = bpf_iter_task_btf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load")) + return; + + bss = skel->bss; + + ret = do_btf_read(skel); + if (ret) + goto cleanup; + + if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?")) + goto cleanup; + + ASSERT_EQ(bss->seq_err, 0, "check for unexpected err"); + +cleanup: + bpf_iter_task_btf__destroy(skel); +} + +static void test_tcp4(void) +{ + struct bpf_iter_tcp4 *skel; + + skel = bpf_iter_tcp4__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_tcp4); + + bpf_iter_tcp4__destroy(skel); +} + +static void test_tcp6(void) +{ + struct bpf_iter_tcp6 *skel; + + skel = bpf_iter_tcp6__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_tcp6); + + bpf_iter_tcp6__destroy(skel); +} + +static void test_udp4(void) +{ + struct bpf_iter_udp4 *skel; + + skel = bpf_iter_udp4__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_udp4); + + bpf_iter_udp4__destroy(skel); +} + +static void test_udp6(void) +{ + struct bpf_iter_udp6 *skel; + + skel = bpf_iter_udp6__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_udp6); + + bpf_iter_udp6__destroy(skel); +} + +static void test_unix(void) +{ + struct bpf_iter_unix *skel; + + skel = bpf_iter_unix__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_unix); + + bpf_iter_unix__destroy(skel); +} + +/* The expected string is less than 16 bytes */ +static int do_read_with_fd(int iter_fd, const char *expected, + bool read_one_char) +{ + int len, read_buf_len, start; + char buf[16] = {}; + + read_buf_len = read_one_char ? 1 : 16; + start = 0; + while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { + start += len; + if (CHECK(start >= 16, "read", "read len %d\n", len)) + return -1; + read_buf_len = read_one_char ? 1 : 16 - start; + } + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + return -1; + + if (!ASSERT_STREQ(buf, expected, "read")) + return -1; + + return 0; +} + +static void test_anon_iter(bool read_one_char) +{ + struct bpf_iter_test_kern1 *skel; + struct bpf_link *link; + int iter_fd, err; + + skel = bpf_iter_test_kern1__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load")) + return; + + err = bpf_iter_test_kern1__attach(skel); + if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) { + goto out; + } + + link = skel->links.dump_task; + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto out; + + do_read_with_fd(iter_fd, "abcd", read_one_char); + close(iter_fd); + +out: + bpf_iter_test_kern1__destroy(skel); +} + +static int do_read(const char *path, const char *expected) +{ + int err, iter_fd; + + iter_fd = open(path, O_RDONLY); + if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", + path, strerror(errno))) + return -1; + + err = do_read_with_fd(iter_fd, expected, false); + close(iter_fd); + return err; +} + +static void test_file_iter(void) +{ + const char *path = "/sys/fs/bpf/bpf_iter_test1"; + struct bpf_iter_test_kern1 *skel1; + struct bpf_iter_test_kern2 *skel2; + struct bpf_link *link; + int err; + + skel1 = bpf_iter_test_kern1__open_and_load(); + if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load")) + return; + + link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + /* unlink this path if it exists. */ + unlink(path); + + err = bpf_link__pin(link, path); + if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) + goto free_link; + + err = do_read(path, "abcd"); + if (err) + goto unlink_path; + + /* file based iterator seems working fine. Let us a link update + * of the underlying link and `cat` the iterator again, its content + * should change. + */ + skel2 = bpf_iter_test_kern2__open_and_load(); + if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load")) + goto unlink_path; + + err = bpf_link__update_program(link, skel2->progs.dump_task); + if (!ASSERT_OK(err, "update_prog")) + goto destroy_skel2; + + do_read(path, "ABCD"); + +destroy_skel2: + bpf_iter_test_kern2__destroy(skel2); +unlink_path: + unlink(path); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_test_kern1__destroy(skel1); +} + +static void test_overflow(bool test_e2big_overflow, bool ret1) +{ + __u32 map_info_len, total_read_len, expected_read_len; + int err, iter_fd, map1_fd, map2_fd, len; + struct bpf_map_info map_info = {}; + struct bpf_iter_test_kern4 *skel; + struct bpf_link *link; + __u32 iter_size; + char *buf; + + skel = bpf_iter_test_kern4__open(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open")) + return; + + /* create two maps: bpf program will only do bpf_seq_write + * for these two maps. The goal is one map output almost + * fills seq_file buffer and then the other will trigger + * overflow and needs restart. + */ + map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); + if (CHECK(map1_fd < 0, "bpf_map_create", + "map_creation failed: %s\n", strerror(errno))) + goto out; + map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); + if (CHECK(map2_fd < 0, "bpf_map_create", + "map_creation failed: %s\n", strerror(errno))) + goto free_map1; + + /* bpf_seq_printf kernel buffer is 8 pages, so one map + * bpf_seq_write will mostly fill it, and the other map + * will partially fill and then trigger overflow and need + * bpf_seq_read restart. + */ + iter_size = sysconf(_SC_PAGE_SIZE) << 3; + + if (test_e2big_overflow) { + skel->rodata->print_len = (iter_size + 8) / 8; + expected_read_len = 2 * (iter_size + 8); + } else if (!ret1) { + skel->rodata->print_len = (iter_size - 8) / 8; + expected_read_len = 2 * (iter_size - 8); + } else { + skel->rodata->print_len = 1; + expected_read_len = 2 * 8; + } + skel->rodata->ret1 = ret1; + + if (!ASSERT_OK(bpf_iter_test_kern4__load(skel), + "bpf_iter_test_kern4__load")) + goto free_map2; + + /* setup filtering map_id in bpf program */ + map_info_len = sizeof(map_info); + err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); + if (CHECK(err, "get_map_info", "get map info failed: %s\n", + strerror(errno))) + goto free_map2; + skel->bss->map1_id = map_info.id; + + err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); + if (CHECK(err, "get_map_info", "get map info failed: %s\n", + strerror(errno))) + goto free_map2; + skel->bss->map2_id = map_info.id; + + link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto free_map2; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + buf = malloc(expected_read_len); + if (!buf) + goto close_iter; + + /* do read */ + total_read_len = 0; + if (test_e2big_overflow) { + while ((len = read(iter_fd, buf, expected_read_len)) > 0) + total_read_len += len; + + CHECK(len != -1 || errno != E2BIG, "read", + "expected ret -1, errno E2BIG, but get ret %d, error %s\n", + len, strerror(errno)); + goto free_buf; + } else if (!ret1) { + while ((len = read(iter_fd, buf, expected_read_len)) > 0) + total_read_len += len; + + if (CHECK(len < 0, "read", "read failed: %s\n", + strerror(errno))) + goto free_buf; + } else { + do { + len = read(iter_fd, buf, expected_read_len); + if (len > 0) + total_read_len += len; + } while (len > 0 || len == -EAGAIN); + + if (CHECK(len < 0, "read", "read failed: %s\n", + strerror(errno))) + goto free_buf; + } + + if (!ASSERT_EQ(total_read_len, expected_read_len, "read")) + goto free_buf; + + if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed")) + goto free_buf; + + if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed")) + goto free_buf; + + ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum"); + +free_buf: + free(buf); +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +free_map2: + close(map2_fd); +free_map1: + close(map1_fd); +out: + bpf_iter_test_kern4__destroy(skel); +} + +static void test_bpf_hash_map(void) +{ + __u32 expected_key_a = 0, expected_key_b = 0; + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_hash_map *skel; + int err, i, len, map_fd, iter_fd; + union bpf_iter_link_info linfo; + __u64 val, expected_val = 0; + struct bpf_link *link; + struct key_t { + int a; + int b; + int c; + } key; + char buf[64]; + + skel = bpf_iter_bpf_hash_map__open(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open")) + return; + + skel->bss->in_test_mode = true; + + err = bpf_iter_bpf_hash_map__load(skel); + if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load")) + goto out; + + /* iterator with hashmap2 and hashmap3 should fail */ + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (!ASSERT_ERR_PTR(link, "attach_iter")) + goto out; + + linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3); + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (!ASSERT_ERR_PTR(link, "attach_iter")) + goto out; + + /* hashmap1 should be good, update map values here */ + map_fd = bpf_map__fd(skel->maps.hashmap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { + key.a = i + 1; + key.b = i + 2; + key.c = i + 3; + val = i + 4; + expected_key_a += key.a; + expected_key_b += key.b; + expected_val += val; + + err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); + if (!ASSERT_OK(err, "map_update")) + goto out; + } + + /* Sleepable program is prohibited for hash map iterator */ + linfo.map.map_fd = map_fd; + link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts); + if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter")) + goto out; + + linfo.map.map_fd = map_fd; + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) + goto close_iter; + if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) + goto close_iter; + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_hash_map__destroy(skel); +} + +static void test_bpf_percpu_hash_map(void) +{ + __u32 expected_key_a = 0, expected_key_b = 0; + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_percpu_hash_map *skel; + int err, i, j, len, map_fd, iter_fd; + union bpf_iter_link_info linfo; + __u32 expected_val = 0; + struct bpf_link *link; + struct key_t { + int a; + int b; + int c; + } key; + char buf[64]; + void *val; + + skel = bpf_iter_bpf_percpu_hash_map__open(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open")) + return; + + skel->rodata->num_cpus = bpf_num_possible_cpus(); + val = malloc(8 * bpf_num_possible_cpus()); + + err = bpf_iter_bpf_percpu_hash_map__load(skel); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load")) + goto out; + + /* update map values here */ + map_fd = bpf_map__fd(skel->maps.hashmap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { + key.a = i + 1; + key.b = i + 2; + key.c = i + 3; + expected_key_a += key.a; + expected_key_b += key.b; + + for (j = 0; j < bpf_num_possible_cpus(); j++) { + *(__u32 *)(val + j * 8) = i + j; + expected_val += i + j; + } + + err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); + if (!ASSERT_OK(err, "map_update")) + goto out; + } + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) + goto close_iter; + if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) + goto close_iter; + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_percpu_hash_map__destroy(skel); + free(val); +} + +static void test_bpf_array_map(void) +{ + __u64 val, expected_val = 0, res_first_val, first_val = 0; + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + __u32 expected_key = 0, res_first_key; + struct bpf_iter_bpf_array_map *skel; + union bpf_iter_link_info linfo; + int err, i, map_fd, iter_fd; + struct bpf_link *link; + char buf[64] = {}; + int len, start; + + skel = bpf_iter_bpf_array_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) + return; + + map_fd = bpf_map__fd(skel->maps.arraymap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { + val = i + 4; + expected_key += i; + expected_val += val; + + if (i == 0) + first_val = val; + + err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); + if (!ASSERT_OK(err, "map_update")) + goto out; + } + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + /* do some tests */ + start = 0; + while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) + start += len; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + res_first_key = *(__u32 *)buf; + res_first_val = *(__u64 *)(buf + sizeof(__u32)); + if (CHECK(res_first_key != 0 || res_first_val != first_val, + "bpf_seq_write", + "seq_write failure: first key %u vs expected 0, " + " first value %llu vs expected %llu\n", + res_first_key, res_first_val, first_val)) + goto close_iter; + + if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) + goto close_iter; + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) + goto close_iter; + + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { + err = bpf_map_lookup_elem(map_fd, &i, &val); + if (!ASSERT_OK(err, "map_lookup")) + goto out; + if (!ASSERT_EQ(i, val, "invalid_val")) + goto out; + } + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_array_map__destroy(skel); +} + +static void test_bpf_array_map_iter_fd(void) +{ + struct bpf_iter_bpf_array_map *skel; + + skel = bpf_iter_bpf_array_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) + return; + + do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map, + skel->maps.arraymap1); + + bpf_iter_bpf_array_map__destroy(skel); +} + +static void test_bpf_percpu_array_map(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_percpu_array_map *skel; + __u32 expected_key = 0, expected_val = 0; + union bpf_iter_link_info linfo; + int err, i, j, map_fd, iter_fd; + struct bpf_link *link; + char buf[64]; + void *val; + int len; + + skel = bpf_iter_bpf_percpu_array_map__open(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open")) + return; + + skel->rodata->num_cpus = bpf_num_possible_cpus(); + val = malloc(8 * bpf_num_possible_cpus()); + + err = bpf_iter_bpf_percpu_array_map__load(skel); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load")) + goto out; + + /* update map values here */ + map_fd = bpf_map__fd(skel->maps.arraymap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { + expected_key += i; + + for (j = 0; j < bpf_num_possible_cpus(); j++) { + *(__u32 *)(val + j * 8) = i + j; + expected_val += i + j; + } + + err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); + if (!ASSERT_OK(err, "map_update")) + goto out; + } + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) + goto close_iter; + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_percpu_array_map__destroy(skel); + free(val); +} + +/* An iterator program deletes all local storage in a map. */ +static void test_bpf_sk_storage_delete(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_sk_storage_helpers *skel; + union bpf_iter_link_info linfo; + int err, len, map_fd, iter_fd; + struct bpf_link *link; + int sock_fd = -1; + __u32 val = 42; + char buf[64]; + + skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) + return; + + map_fd = bpf_map__fd(skel->maps.sk_stg_map); + + sock_fd = socket(AF_INET6, SOCK_STREAM, 0); + if (!ASSERT_GE(sock_fd, 0, "socket")) + goto out; + err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); + if (!ASSERT_OK(err, "map_update")) + goto out; + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map, + &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); + if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", + "map value wasn't deleted (err=%d, errno=%d)\n", err, errno)) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + if (sock_fd >= 0) + close(sock_fd); + bpf_iter_bpf_sk_storage_helpers__destroy(skel); +} + +/* This creates a socket and its local storage. It then runs a task_iter BPF + * program that replaces the existing socket local storage with the tgid of the + * only task owning a file descriptor to this socket, this process, prog_tests. + * It then runs a tcp socket iterator that negates the value in the existing + * socket local storage, the test verifies that the resulting value is -pid. + */ +static void test_bpf_sk_storage_get(void) +{ + struct bpf_iter_bpf_sk_storage_helpers *skel; + int err, map_fd, val = -1; + int sock_fd = -1; + + skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) + return; + + sock_fd = socket(AF_INET6, SOCK_STREAM, 0); + if (!ASSERT_GE(sock_fd, 0, "socket")) + goto out; + + err = listen(sock_fd, 1); + if (!ASSERT_OK(err, "listen")) + goto close_socket; + + map_fd = bpf_map__fd(skel->maps.sk_stg_map); + + err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem")) + goto close_socket; + + do_dummy_read(skel->progs.fill_socket_owner); + + err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); + if (CHECK(err || val != getpid(), "bpf_map_lookup_elem", + "map value wasn't set correctly (expected %d, got %d, err=%d)\n", + getpid(), val, err)) + goto close_socket; + + do_dummy_read(skel->progs.negate_socket_local_storage); + + err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); + CHECK(err || val != -getpid(), "bpf_map_lookup_elem", + "map value wasn't set correctly (expected %d, got %d, err=%d)\n", + -getpid(), val, err); + +close_socket: + close(sock_fd); +out: + bpf_iter_bpf_sk_storage_helpers__destroy(skel); +} + +static void test_bpf_sk_stoarge_map_iter_fd(void) +{ + struct bpf_iter_bpf_sk_storage_map *skel; + + skel = bpf_iter_bpf_sk_storage_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) + return; + + do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map, + skel->maps.sk_stg_map); + + bpf_iter_bpf_sk_storage_map__destroy(skel); +} + +static void test_bpf_sk_storage_map(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + int err, i, len, map_fd, iter_fd, num_sockets; + struct bpf_iter_bpf_sk_storage_map *skel; + union bpf_iter_link_info linfo; + int sock_fd[3] = {-1, -1, -1}; + __u32 val, expected_val = 0; + struct bpf_link *link; + char buf[64]; + + skel = bpf_iter_bpf_sk_storage_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) + return; + + map_fd = bpf_map__fd(skel->maps.sk_stg_map); + num_sockets = ARRAY_SIZE(sock_fd); + for (i = 0; i < num_sockets; i++) { + sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); + if (!ASSERT_GE(sock_fd[i], 0, "socket")) + goto out; + + val = i + 1; + expected_val += val; + + err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, + BPF_NOEXIST); + if (!ASSERT_OK(err, "map_update")) + goto out; + } + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = map_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts); + err = libbpf_get_error(link); + if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) { + if (!err) + bpf_link__destroy(link); + goto out; + } + + link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + skel->bss->to_add_val = time(NULL); + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count")) + goto close_iter; + + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) + goto close_iter; + + for (i = 0; i < num_sockets; i++) { + err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val); + if (!ASSERT_OK(err, "map_lookup") || + !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value")) + break; + } + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + for (i = 0; i < num_sockets; i++) { + if (sock_fd[i] >= 0) + close(sock_fd[i]); + } + bpf_iter_bpf_sk_storage_map__destroy(skel); +} + +static void test_rdonly_buf_out_of_bound(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_test_kern5 *skel; + union bpf_iter_link_info linfo; + struct bpf_link *link; + + skel = bpf_iter_test_kern5__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load")) + return; + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (!ASSERT_ERR_PTR(link, "attach_iter")) + bpf_link__destroy(link); + + bpf_iter_test_kern5__destroy(skel); +} + +static void test_buf_neg_offset(void) +{ + struct bpf_iter_test_kern6 *skel; + + skel = bpf_iter_test_kern6__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load")) + bpf_iter_test_kern6__destroy(skel); +} + +static void test_link_iter(void) +{ + struct bpf_iter_bpf_link *skel; + + skel = bpf_iter_bpf_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_bpf_link); + + bpf_iter_bpf_link__destroy(skel); +} + +static void test_ksym_iter(void) +{ + struct bpf_iter_ksym *skel; + + skel = bpf_iter_ksym__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_ksym); + + bpf_iter_ksym__destroy(skel); +} + +#define CMP_BUFFER_SIZE 1024 +static char task_vma_output[CMP_BUFFER_SIZE]; +static char proc_maps_output[CMP_BUFFER_SIZE]; + +/* remove \0 and \t from str, and only keep the first line */ +static void str_strip_first_line(char *str) +{ + char *dst = str, *src = str; + + do { + if (*src == ' ' || *src == '\t') + src++; + else + *(dst++) = *(src++); + + } while (*src != '\0' && *src != '\n'); + + *dst = '\0'; +} + +static void test_task_vma_common(struct bpf_iter_attach_opts *opts) +{ + int err, iter_fd = -1, proc_maps_fd = -1; + struct bpf_iter_task_vma *skel; + int len, read_size = 4; + char maps_path[64]; + + skel = bpf_iter_task_vma__open(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open")) + return; + + skel->bss->pid = getpid(); + skel->bss->one_task = opts ? 1 : 0; + + err = bpf_iter_task_vma__load(skel); + if (!ASSERT_OK(err, "bpf_iter_task_vma__load")) + goto out; + + skel->links.proc_maps = bpf_program__attach_iter( + skel->progs.proc_maps, opts); + + if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) { + skel->links.proc_maps = NULL; + goto out; + } + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto out; + + /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks + * to trigger seq_file corner cases. + */ + len = 0; + while (len < CMP_BUFFER_SIZE) { + err = read_fd_into_buffer(iter_fd, task_vma_output + len, + MIN(read_size, CMP_BUFFER_SIZE - len)); + if (!err) + break; + if (!ASSERT_GE(err, 0, "read_iter_fd")) + goto out; + len += err; + } + if (opts) + ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task"); + + /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */ + snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid); + proc_maps_fd = open(maps_path, O_RDONLY); + if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps")) + goto out; + err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE); + if (!ASSERT_GE(err, 0, "read_prog_maps_fd")) + goto out; + + /* strip and compare the first line of the two files */ + str_strip_first_line(task_vma_output); + str_strip_first_line(proc_maps_output); + + ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output"); + + check_bpf_link_info(skel->progs.proc_maps); + +out: + close(proc_maps_fd); + close(iter_fd); + bpf_iter_task_vma__destroy(skel); +} + +void test_bpf_sockmap_map_iter_fd(void) +{ + struct bpf_iter_sockmap *skel; + + skel = bpf_iter_sockmap__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) + return; + + do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap); + + bpf_iter_sockmap__destroy(skel); +} + +static void test_task_vma(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.tid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + test_task_vma_common(&opts); + test_task_vma_common(NULL); +} + +/* uprobe attach point */ +static noinline int trigger_func(int arg) +{ + asm volatile (""); + return arg + 1; +} + +static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc) +{ + struct bpf_iter_vma_offset *skel; + char buf[16] = {}; + int iter_fd, len; + int pgsz, shift; + + skel = bpf_iter_vma_offset__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load")) + return; + + skel->bss->pid = getpid(); + skel->bss->address = (uintptr_t)trigger_func; + for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++) + ; + skel->bss->page_shift = shift; + + skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts); + if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter")) + goto exit; + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset)); + if (!ASSERT_GT(iter_fd, 0, "create_iter")) + goto exit; + + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + buf[15] = 0; + ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp"); + + ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset"); + if (one_proc) + ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); + else + ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); + + close(iter_fd); + +exit: + bpf_iter_vma_offset__destroy(skel); +} + +static void test_task_vma_offset(void) +{ + LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + + memset(&linfo, 0, sizeof(linfo)); + linfo.task.pid = getpid(); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + test_task_vma_offset_common(&opts, true); + + linfo.task.pid = 0; + linfo.task.tid = getpid(); + test_task_vma_offset_common(&opts, true); + + test_task_vma_offset_common(NULL, false); +} + +void test_bpf_iter(void) +{ + ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init"); + + if (test__start_subtest("btf_id_or_null")) + test_btf_id_or_null(); + if (test__start_subtest("ipv6_route")) + test_ipv6_route(); + if (test__start_subtest("netlink")) + test_netlink(); + if (test__start_subtest("bpf_map")) + test_bpf_map(); + if (test__start_subtest("task_tid")) + test_task_tid(); + if (test__start_subtest("task_pid")) + test_task_pid(); + if (test__start_subtest("task_pidfd")) + test_task_pidfd(); + if (test__start_subtest("task_sleepable")) + test_task_sleepable(); + if (test__start_subtest("task_stack")) + test_task_stack(); + if (test__start_subtest("task_file")) + test_task_file(); + if (test__start_subtest("task_vma")) + test_task_vma(); + if (test__start_subtest("task_btf")) + test_task_btf(); + if (test__start_subtest("tcp4")) + test_tcp4(); + if (test__start_subtest("tcp6")) + test_tcp6(); + if (test__start_subtest("udp4")) + test_udp4(); + if (test__start_subtest("udp6")) + test_udp6(); + if (test__start_subtest("unix")) + test_unix(); + if (test__start_subtest("anon")) + test_anon_iter(false); + if (test__start_subtest("anon-read-one-char")) + test_anon_iter(true); + if (test__start_subtest("file")) + test_file_iter(); + if (test__start_subtest("overflow")) + test_overflow(false, false); + if (test__start_subtest("overflow-e2big")) + test_overflow(true, false); + if (test__start_subtest("prog-ret-1")) + test_overflow(false, true); + if (test__start_subtest("bpf_hash_map")) + test_bpf_hash_map(); + if (test__start_subtest("bpf_percpu_hash_map")) + test_bpf_percpu_hash_map(); + if (test__start_subtest("bpf_array_map")) + test_bpf_array_map(); + if (test__start_subtest("bpf_array_map_iter_fd")) + test_bpf_array_map_iter_fd(); + if (test__start_subtest("bpf_percpu_array_map")) + test_bpf_percpu_array_map(); + if (test__start_subtest("bpf_sk_storage_map")) + test_bpf_sk_storage_map(); + if (test__start_subtest("bpf_sk_storage_map_iter_fd")) + test_bpf_sk_stoarge_map_iter_fd(); + if (test__start_subtest("bpf_sk_storage_delete")) + test_bpf_sk_storage_delete(); + if (test__start_subtest("bpf_sk_storage_get")) + test_bpf_sk_storage_get(); + if (test__start_subtest("rdonly-buf-out-of-bound")) + test_rdonly_buf_out_of_bound(); + if (test__start_subtest("buf-neg-offset")) + test_buf_neg_offset(); + if (test__start_subtest("link-iter")) + test_link_iter(); + if (test__start_subtest("ksym")) + test_ksym_iter(); + if (test__start_subtest("bpf_sockmap_map_iter_fd")) + test_bpf_sockmap_map_iter_fd(); + if (test__start_subtest("vma_offset")) + test_task_vma_offset(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c new file mode 100644 index 000000000..b52ff8ce3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#define _GNU_SOURCE +#include <sched.h> +#include <test_progs.h> +#include "network_helpers.h" +#include "bpf_dctcp.skel.h" +#include "bpf_cubic.skel.h" +#include "bpf_iter_setsockopt.skel.h" + +static int create_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return -1; + + if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo")) + return -1; + + return 0; +} + +static unsigned int set_bpf_cubic(int *fds, unsigned int nr_fds) +{ + unsigned int i; + + for (i = 0; i < nr_fds; i++) { + if (setsockopt(fds[i], SOL_TCP, TCP_CONGESTION, "bpf_cubic", + sizeof("bpf_cubic"))) + return i; + } + + return nr_fds; +} + +static unsigned int check_bpf_dctcp(int *fds, unsigned int nr_fds) +{ + char tcp_cc[16]; + socklen_t optlen = sizeof(tcp_cc); + unsigned int i; + + for (i = 0; i < nr_fds; i++) { + if (getsockopt(fds[i], SOL_TCP, TCP_CONGESTION, + tcp_cc, &optlen) || + strcmp(tcp_cc, "bpf_dctcp")) + return i; + } + + return nr_fds; +} + +static int *make_established(int listen_fd, unsigned int nr_est, + int **paccepted_fds) +{ + int *est_fds, *accepted_fds; + unsigned int i; + + est_fds = malloc(sizeof(*est_fds) * nr_est); + if (!est_fds) + return NULL; + + accepted_fds = malloc(sizeof(*accepted_fds) * nr_est); + if (!accepted_fds) { + free(est_fds); + return NULL; + } + + for (i = 0; i < nr_est; i++) { + est_fds[i] = connect_to_fd(listen_fd, 0); + if (est_fds[i] == -1) + break; + if (set_bpf_cubic(&est_fds[i], 1) != 1) { + close(est_fds[i]); + break; + } + + accepted_fds[i] = accept(listen_fd, NULL, 0); + if (accepted_fds[i] == -1) { + close(est_fds[i]); + break; + } + } + + if (!ASSERT_EQ(i, nr_est, "create established fds")) { + free_fds(accepted_fds, i); + free_fds(est_fds, i); + return NULL; + } + + *paccepted_fds = accepted_fds; + return est_fds; +} + +static unsigned short get_local_port(int fd) +{ + struct sockaddr_in6 addr; + socklen_t addrlen = sizeof(addr); + + if (!getsockname(fd, &addr, &addrlen)) + return ntohs(addr.sin6_port); + + return 0; +} + +static void do_bpf_iter_setsockopt(struct bpf_iter_setsockopt *iter_skel, + bool random_retry) +{ + int *reuse_listen_fds = NULL, *accepted_fds = NULL, *est_fds = NULL; + unsigned int nr_reuse_listens = 256, nr_est = 256; + int err, iter_fd = -1, listen_fd = -1; + char buf; + + /* Prepare non-reuseport listen_fd */ + listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + if (!ASSERT_GE(listen_fd, 0, "start_server")) + return; + if (!ASSERT_EQ(set_bpf_cubic(&listen_fd, 1), 1, + "set listen_fd to cubic")) + goto done; + iter_skel->bss->listen_hport = get_local_port(listen_fd); + if (!ASSERT_NEQ(iter_skel->bss->listen_hport, 0, + "get_local_port(listen_fd)")) + goto done; + + /* Connect to non-reuseport listen_fd */ + est_fds = make_established(listen_fd, nr_est, &accepted_fds); + if (!ASSERT_OK_PTR(est_fds, "create established")) + goto done; + + /* Prepare reuseport listen fds */ + reuse_listen_fds = start_reuseport_server(AF_INET6, SOCK_STREAM, + "::1", 0, 0, + nr_reuse_listens); + if (!ASSERT_OK_PTR(reuse_listen_fds, "start_reuseport_server")) + goto done; + if (!ASSERT_EQ(set_bpf_cubic(reuse_listen_fds, nr_reuse_listens), + nr_reuse_listens, "set reuse_listen_fds to cubic")) + goto done; + iter_skel->bss->reuse_listen_hport = get_local_port(reuse_listen_fds[0]); + if (!ASSERT_NEQ(iter_skel->bss->reuse_listen_hport, 0, + "get_local_port(reuse_listen_fds[0])")) + goto done; + + /* Run bpf tcp iter to switch from bpf_cubic to bpf_dctcp */ + iter_skel->bss->random_retry = random_retry; + iter_fd = bpf_iter_create(bpf_link__fd(iter_skel->links.change_tcp_cc)); + if (!ASSERT_GE(iter_fd, 0, "create iter_fd")) + goto done; + + while ((err = read(iter_fd, &buf, sizeof(buf))) == -1 && + errno == EAGAIN) + ; + if (!ASSERT_OK(err, "read iter error")) + goto done; + + /* Check reuseport listen fds for dctcp */ + ASSERT_EQ(check_bpf_dctcp(reuse_listen_fds, nr_reuse_listens), + nr_reuse_listens, + "check reuse_listen_fds dctcp"); + + /* Check non reuseport listen fd for dctcp */ + ASSERT_EQ(check_bpf_dctcp(&listen_fd, 1), 1, + "check listen_fd dctcp"); + + /* Check established fds for dctcp */ + ASSERT_EQ(check_bpf_dctcp(est_fds, nr_est), nr_est, + "check est_fds dctcp"); + + /* Check accepted fds for dctcp */ + ASSERT_EQ(check_bpf_dctcp(accepted_fds, nr_est), nr_est, + "check accepted_fds dctcp"); + +done: + if (iter_fd != -1) + close(iter_fd); + if (listen_fd != -1) + close(listen_fd); + free_fds(reuse_listen_fds, nr_reuse_listens); + free_fds(accepted_fds, nr_est); + free_fds(est_fds, nr_est); +} + +void serial_test_bpf_iter_setsockopt(void) +{ + struct bpf_iter_setsockopt *iter_skel = NULL; + struct bpf_cubic *cubic_skel = NULL; + struct bpf_dctcp *dctcp_skel = NULL; + struct bpf_link *cubic_link = NULL; + struct bpf_link *dctcp_link = NULL; + + if (create_netns()) + return; + + /* Load iter_skel */ + iter_skel = bpf_iter_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(iter_skel, "iter_skel")) + return; + iter_skel->links.change_tcp_cc = bpf_program__attach_iter(iter_skel->progs.change_tcp_cc, NULL); + if (!ASSERT_OK_PTR(iter_skel->links.change_tcp_cc, "attach iter")) + goto done; + + /* Load bpf_cubic */ + cubic_skel = bpf_cubic__open_and_load(); + if (!ASSERT_OK_PTR(cubic_skel, "cubic_skel")) + goto done; + cubic_link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic); + if (!ASSERT_OK_PTR(cubic_link, "cubic_link")) + goto done; + + /* Load bpf_dctcp */ + dctcp_skel = bpf_dctcp__open_and_load(); + if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel")) + goto done; + dctcp_link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); + if (!ASSERT_OK_PTR(dctcp_link, "dctcp_link")) + goto done; + + do_bpf_iter_setsockopt(iter_skel, true); + do_bpf_iter_setsockopt(iter_skel, false); + +done: + bpf_link__destroy(cubic_link); + bpf_link__destroy(dctcp_link); + bpf_cubic__destroy(cubic_skel); + bpf_dctcp__destroy(dctcp_skel); + bpf_iter_setsockopt__destroy(iter_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c new file mode 100644 index 000000000..ee725d4d9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ +#include <sys/socket.h> +#include <sys/un.h> +#include <test_progs.h> +#include "bpf_iter_setsockopt_unix.skel.h" + +#define NR_CASES 5 + +static int create_unix_socket(struct bpf_iter_setsockopt_unix *skel) +{ + struct sockaddr_un addr = { + .sun_family = AF_UNIX, + .sun_path = "", + }; + socklen_t len; + int fd, err; + + fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (!ASSERT_NEQ(fd, -1, "socket")) + return -1; + + len = offsetof(struct sockaddr_un, sun_path); + err = bind(fd, (struct sockaddr *)&addr, len); + if (!ASSERT_OK(err, "bind")) + return -1; + + len = sizeof(addr); + err = getsockname(fd, (struct sockaddr *)&addr, &len); + if (!ASSERT_OK(err, "getsockname")) + return -1; + + memcpy(&skel->bss->sun_path, &addr.sun_path, + len - offsetof(struct sockaddr_un, sun_path)); + + return fd; +} + +static void test_sndbuf(struct bpf_iter_setsockopt_unix *skel, int fd) +{ + socklen_t optlen; + int i, err; + + for (i = 0; i < NR_CASES; i++) { + if (!ASSERT_NEQ(skel->data->sndbuf_getsockopt[i], -1, + "bpf_(get|set)sockopt")) + return; + + err = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, + &(skel->data->sndbuf_setsockopt[i]), + sizeof(skel->data->sndbuf_setsockopt[i])); + if (!ASSERT_OK(err, "setsockopt")) + return; + + optlen = sizeof(skel->bss->sndbuf_getsockopt_expected[i]); + err = getsockopt(fd, SOL_SOCKET, SO_SNDBUF, + &(skel->bss->sndbuf_getsockopt_expected[i]), + &optlen); + if (!ASSERT_OK(err, "getsockopt")) + return; + + if (!ASSERT_EQ(skel->data->sndbuf_getsockopt[i], + skel->bss->sndbuf_getsockopt_expected[i], + "bpf_(get|set)sockopt")) + return; + } +} + +void test_bpf_iter_setsockopt_unix(void) +{ + struct bpf_iter_setsockopt_unix *skel; + int err, unix_fd, iter_fd; + char buf; + + skel = bpf_iter_setsockopt_unix__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + unix_fd = create_unix_socket(skel); + if (!ASSERT_NEQ(unix_fd, -1, "create_unix_server")) + goto destroy; + + skel->links.change_sndbuf = bpf_program__attach_iter(skel->progs.change_sndbuf, NULL); + if (!ASSERT_OK_PTR(skel->links.change_sndbuf, "bpf_program__attach_iter")) + goto destroy; + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.change_sndbuf)); + if (!ASSERT_GE(iter_fd, 0, "bpf_iter_create")) + goto destroy; + + while ((err = read(iter_fd, &buf, sizeof(buf))) == -1 && + errno == EAGAIN) + ; + if (!ASSERT_OK(err, "read iter error")) + goto destroy; + + test_sndbuf(skel, unix_fd); +destroy: + bpf_iter_setsockopt_unix__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_loop.c b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c new file mode 100644 index 000000000..4cd8a25af --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <network_helpers.h> +#include "bpf_loop.skel.h" + +static void check_nr_loops(struct bpf_loop *skel) +{ + struct bpf_link *link; + + link = bpf_program__attach(skel->progs.test_prog); + if (!ASSERT_OK_PTR(link, "link")) + return; + + /* test 0 loops */ + skel->bss->nr_loops = 0; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, + "0 loops"); + + /* test 500 loops */ + skel->bss->nr_loops = 500; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, + "500 loops"); + ASSERT_EQ(skel->bss->g_output, (500 * 499) / 2, "g_output"); + + /* test exceeding the max limit */ + skel->bss->nr_loops = -1; + + usleep(1); + + ASSERT_EQ(skel->bss->err, -E2BIG, "over max limit"); + + bpf_link__destroy(link); +} + +static void check_callback_fn_stop(struct bpf_loop *skel) +{ + struct bpf_link *link; + + link = bpf_program__attach(skel->progs.test_prog); + if (!ASSERT_OK_PTR(link, "link")) + return; + + /* testing that loop is stopped when callback_fn returns 1 */ + skel->bss->nr_loops = 400; + skel->data->stop_index = 50; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->data->stop_index + 1, + "nr_loops_returned"); + ASSERT_EQ(skel->bss->g_output, (50 * 49) / 2, + "g_output"); + + bpf_link__destroy(link); +} + +static void check_null_callback_ctx(struct bpf_loop *skel) +{ + struct bpf_link *link; + + /* check that user is able to pass in a null callback_ctx */ + link = bpf_program__attach(skel->progs.prog_null_ctx); + if (!ASSERT_OK_PTR(link, "link")) + return; + + skel->bss->nr_loops = 10; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, skel->bss->nr_loops, + "nr_loops_returned"); + + bpf_link__destroy(link); +} + +static void check_invalid_flags(struct bpf_loop *skel) +{ + struct bpf_link *link; + + /* check that passing in non-zero flags returns -EINVAL */ + link = bpf_program__attach(skel->progs.prog_invalid_flags); + if (!ASSERT_OK_PTR(link, "link")) + return; + + usleep(1); + + ASSERT_EQ(skel->bss->err, -EINVAL, "err"); + + bpf_link__destroy(link); +} + +static void check_nested_calls(struct bpf_loop *skel) +{ + __u32 nr_loops = 100, nested_callback_nr_loops = 4; + struct bpf_link *link; + + /* check that nested calls are supported */ + link = bpf_program__attach(skel->progs.prog_nested_calls); + if (!ASSERT_OK_PTR(link, "link")) + return; + + skel->bss->nr_loops = nr_loops; + skel->bss->nested_callback_nr_loops = nested_callback_nr_loops; + + usleep(1); + + ASSERT_EQ(skel->bss->nr_loops_returned, nr_loops * nested_callback_nr_loops + * nested_callback_nr_loops, "nr_loops_returned"); + ASSERT_EQ(skel->bss->g_output, (4 * 3) / 2 * nested_callback_nr_loops + * nr_loops, "g_output"); + + bpf_link__destroy(link); +} + +static void check_non_constant_callback(struct bpf_loop *skel) +{ + struct bpf_link *link = + bpf_program__attach(skel->progs.prog_non_constant_callback); + + if (!ASSERT_OK_PTR(link, "link")) + return; + + skel->bss->callback_selector = 0x0F; + usleep(1); + ASSERT_EQ(skel->bss->g_output, 0x0F, "g_output #1"); + + skel->bss->callback_selector = 0xF0; + usleep(1); + ASSERT_EQ(skel->bss->g_output, 0xF0, "g_output #2"); + + bpf_link__destroy(link); +} + +static void check_stack(struct bpf_loop *skel) +{ + struct bpf_link *link = bpf_program__attach(skel->progs.stack_check); + const int max_key = 12; + int key; + int map_fd; + + if (!ASSERT_OK_PTR(link, "link")) + return; + + map_fd = bpf_map__fd(skel->maps.map1); + + if (!ASSERT_GE(map_fd, 0, "bpf_map__fd")) + goto out; + + for (key = 1; key <= max_key; ++key) { + int val = key; + int err = bpf_map_update_elem(map_fd, &key, &val, BPF_NOEXIST); + + if (!ASSERT_OK(err, "bpf_map_update_elem")) + goto out; + } + + usleep(1); + + for (key = 1; key <= max_key; ++key) { + int val; + int err = bpf_map_lookup_elem(map_fd, &key, &val); + + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + goto out; + if (!ASSERT_EQ(val, key + 1, "bad value in the map")) + goto out; + } + +out: + bpf_link__destroy(link); +} + +void test_bpf_loop(void) +{ + struct bpf_loop *skel; + + skel = bpf_loop__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_loop__open_and_load")) + return; + + skel->bss->pid = getpid(); + + if (test__start_subtest("check_nr_loops")) + check_nr_loops(skel); + if (test__start_subtest("check_callback_fn_stop")) + check_callback_fn_stop(skel); + if (test__start_subtest("check_null_callback_ctx")) + check_null_callback_ctx(skel); + if (test__start_subtest("check_invalid_flags")) + check_invalid_flags(skel); + if (test__start_subtest("check_nested_calls")) + check_nested_calls(skel); + if (test__start_subtest("check_non_constant_callback")) + check_non_constant_callback(skel); + if (test__start_subtest("check_stack")) + check_stack(skel); + + bpf_loop__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c new file mode 100644 index 000000000..a4d0cc9d3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <unistd.h> +#include <pthread.h> +#include <sys/mman.h> +#include <stdatomic.h> +#include <test_progs.h> +#include <sys/syscall.h> +#include <linux/module.h> +#include <linux/userfaultfd.h> + +#include "ksym_race.skel.h" +#include "bpf_mod_race.skel.h" +#include "kfunc_call_race.skel.h" + +/* This test crafts a race between btf_try_get_module and do_init_module, and + * checks whether btf_try_get_module handles the invocation for a well-formed + * but uninitialized module correctly. Unless the module has completed its + * initcalls, the verifier should fail the program load and return ENXIO. + * + * userfaultfd is used to trigger a fault in an fmod_ret program, and make it + * sleep, then the BPF program is loaded and the return value from verifier is + * inspected. After this, the userfaultfd is closed so that the module loading + * thread makes forward progress, and fmod_ret injects an error so that the + * module load fails and it is freed. + * + * If the verifier succeeded in loading the supplied program, it will end up + * taking reference to freed module, and trigger a crash when the program fd + * is closed later. This is true for both kfuncs and ksyms. In both cases, + * the crash is triggered inside bpf_prog_free_deferred, when module reference + * is finally released. + */ + +struct test_config { + const char *str_open; + void *(*bpf_open_and_load)(); + void (*bpf_destroy)(void *); +}; + +enum bpf_test_state { + _TS_INVALID, + TS_MODULE_LOAD, + TS_MODULE_LOAD_FAIL, +}; + +static _Atomic enum bpf_test_state state = _TS_INVALID; + +static int sys_finit_module(int fd, const char *param_values, int flags) +{ + return syscall(__NR_finit_module, fd, param_values, flags); +} + +static int sys_delete_module(const char *name, unsigned int flags) +{ + return syscall(__NR_delete_module, name, flags); +} + +static int load_module(const char *mod) +{ + int ret, fd; + + fd = open("bpf_testmod.ko", O_RDONLY); + if (fd < 0) + return fd; + + ret = sys_finit_module(fd, "", 0); + close(fd); + if (ret < 0) + return ret; + return 0; +} + +static void *load_module_thread(void *p) +{ + + if (!ASSERT_NEQ(load_module("bpf_testmod.ko"), 0, "load_module_thread must fail")) + atomic_store(&state, TS_MODULE_LOAD); + else + atomic_store(&state, TS_MODULE_LOAD_FAIL); + return p; +} + +static int sys_userfaultfd(int flags) +{ + return syscall(__NR_userfaultfd, flags); +} + +static int test_setup_uffd(void *fault_addr) +{ + struct uffdio_register uffd_register = {}; + struct uffdio_api uffd_api = {}; + int uffd; + + uffd = sys_userfaultfd(O_CLOEXEC); + if (uffd < 0) + return -errno; + + uffd_api.api = UFFD_API; + uffd_api.features = 0; + if (ioctl(uffd, UFFDIO_API, &uffd_api)) { + close(uffd); + return -1; + } + + uffd_register.range.start = (unsigned long)fault_addr; + uffd_register.range.len = 4096; + uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING; + if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) { + close(uffd); + return -1; + } + return uffd; +} + +static void test_bpf_mod_race_config(const struct test_config *config) +{ + void *fault_addr, *skel_fail; + struct bpf_mod_race *skel; + struct uffd_msg uffd_msg; + pthread_t load_mod_thrd; + _Atomic int *blockingp; + int uffd, ret; + + fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration")) + return; + + if (!ASSERT_OK(sys_delete_module("bpf_testmod", 0), "unload bpf_testmod")) + goto end_mmap; + + skel = bpf_mod_race__open(); + if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open")) + goto end_module; + + skel->rodata->bpf_mod_race_config.tgid = getpid(); + skel->rodata->bpf_mod_race_config.inject_error = -4242; + skel->rodata->bpf_mod_race_config.fault_addr = fault_addr; + if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load")) + goto end_destroy; + blockingp = (_Atomic int *)&skel->bss->bpf_blocking; + + if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach")) + goto end_destroy; + + uffd = test_setup_uffd(fault_addr); + if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address")) + goto end_destroy; + + if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL), + "load module thread")) + goto end_uffd; + + /* Now, we either fail loading module, or block in bpf prog, spin to find out */ + while (!atomic_load(&state) && !atomic_load(blockingp)) + ; + if (!ASSERT_EQ(state, _TS_INVALID, "module load should block")) + goto end_join; + if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) { + pthread_kill(load_mod_thrd, SIGKILL); + goto end_uffd; + } + + /* We might have set bpf_blocking to 1, but may have not blocked in + * bpf_copy_from_user. Read userfaultfd descriptor to verify that. + */ + if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg), + "read uffd block event")) + goto end_join; + if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault")) + goto end_join; + + /* We know that load_mod_thrd is blocked in the fmod_ret program, the + * module state is still MODULE_STATE_COMING because mod->init hasn't + * returned. This is the time we try to load a program calling kfunc and + * check if we get ENXIO from verifier. + */ + skel_fail = config->bpf_open_and_load(); + ret = errno; + if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) { + /* Close uffd to unblock load_mod_thrd */ + close(uffd); + uffd = -1; + while (atomic_load(blockingp) != 2) + ; + ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu"); + config->bpf_destroy(skel_fail); + goto end_join; + + } + ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO"); + ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false"); + + close(uffd); + uffd = -1; +end_join: + pthread_join(load_mod_thrd, NULL); + if (uffd < 0) + ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success"); +end_uffd: + if (uffd >= 0) + close(uffd); +end_destroy: + bpf_mod_race__destroy(skel); + ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu"); +end_module: + sys_delete_module("bpf_testmod", 0); + ASSERT_OK(load_module("bpf_testmod.ko"), "restore bpf_testmod"); +end_mmap: + munmap(fault_addr, 4096); + atomic_store(&state, _TS_INVALID); +} + +static const struct test_config ksym_config = { + .str_open = "ksym_race__open_and_load", + .bpf_open_and_load = (void *)ksym_race__open_and_load, + .bpf_destroy = (void *)ksym_race__destroy, +}; + +static const struct test_config kfunc_config = { + .str_open = "kfunc_call_race__open_and_load", + .bpf_open_and_load = (void *)kfunc_call_race__open_and_load, + .bpf_destroy = (void *)kfunc_call_race__destroy, +}; + +void serial_test_bpf_mod_race(void) +{ + if (test__start_subtest("ksym (used_btfs UAF)")) + test_bpf_mod_race_config(&ksym_config); + if (test__start_subtest("kfunc (kfunc_btf_tab UAF)")) + test_bpf_mod_race_config(&kfunc_config); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c new file mode 100644 index 000000000..b2998896f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include <linux/netfilter/nf_conntrack_common.h> +#include "test_bpf_nf.skel.h" +#include "test_bpf_nf_fail.skel.h" + +static char log_buf[1024 * 1024]; + +struct { + const char *prog_name; + const char *err_msg; +} test_bpf_nf_fail_tests[] = { + { "alloc_release", "kernel function bpf_ct_release args#0 expected pointer to STRUCT nf_conn but" }, + { "insert_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" }, + { "lookup_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" }, + { "set_timeout_after_insert", "kernel function bpf_ct_set_timeout args#0 expected pointer to STRUCT nf_conn___init but" }, + { "set_status_after_insert", "kernel function bpf_ct_set_status args#0 expected pointer to STRUCT nf_conn___init but" }, + { "change_timeout_after_alloc", "kernel function bpf_ct_change_timeout args#0 expected pointer to STRUCT nf_conn but" }, + { "change_status_after_alloc", "kernel function bpf_ct_change_status args#0 expected pointer to STRUCT nf_conn but" }, + { "write_not_allowlisted_field", "no write support to nf_conn at off" }, +}; + +enum { + TEST_XDP, + TEST_TC_BPF, +}; + +#define TIMEOUT_MS 3000 +#define IPS_STATUS_MASK (IPS_CONFIRMED | IPS_SEEN_REPLY | \ + IPS_SRC_NAT_DONE | IPS_DST_NAT_DONE | \ + IPS_SRC_NAT | IPS_DST_NAT) + +static int connect_to_server(int srv_fd) +{ + int fd = -1; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(fd, 0, "socket")) + goto out; + + if (!ASSERT_EQ(connect_fd_to_fd(fd, srv_fd, TIMEOUT_MS), 0, "connect_fd_to_fd")) { + close(fd); + fd = -1; + } +out: + return fd; +} + +static void test_bpf_nf_ct(int mode) +{ + const char *iptables = "iptables -t raw %s PREROUTING -j CONNMARK --set-mark 42/0"; + int srv_fd = -1, client_fd = -1, srv_client_fd = -1; + struct sockaddr_in peer_addr = {}; + struct test_bpf_nf *skel; + int prog_fd, err; + socklen_t len; + u16 srv_port; + char cmd[64]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + skel = test_bpf_nf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load")) + return; + + /* Enable connection tracking */ + snprintf(cmd, sizeof(cmd), iptables, "-A"); + if (!ASSERT_OK(system(cmd), "iptables")) + goto end; + + srv_port = (mode == TEST_XDP) ? 5005 : 5006; + srv_fd = start_server(AF_INET, SOCK_STREAM, "127.0.0.1", srv_port, TIMEOUT_MS); + if (!ASSERT_GE(srv_fd, 0, "start_server")) + goto end; + + client_fd = connect_to_server(srv_fd); + if (!ASSERT_GE(client_fd, 0, "connect_to_server")) + goto end; + + len = sizeof(peer_addr); + srv_client_fd = accept(srv_fd, (struct sockaddr *)&peer_addr, &len); + if (!ASSERT_GE(srv_client_fd, 0, "accept")) + goto end; + if (!ASSERT_EQ(len, sizeof(struct sockaddr_in), "sockaddr len")) + goto end; + + skel->bss->saddr = peer_addr.sin_addr.s_addr; + skel->bss->sport = peer_addr.sin_port; + skel->bss->daddr = peer_addr.sin_addr.s_addr; + skel->bss->dport = htons(srv_port); + + if (mode == TEST_XDP) + prog_fd = bpf_program__fd(skel->progs.nf_xdp_ct_test); + else + prog_fd = bpf_program__fd(skel->progs.nf_skb_ct_test); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "bpf_prog_test_run")) + goto end; + + ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple"); + ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0"); + ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1"); + ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ"); + ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP"); + ASSERT_EQ(skel->bss->test_enonet_netns_id, -ENONET, "Test ENONET for bad but valid netns_id"); + ASSERT_EQ(skel->bss->test_enoent_lookup, -ENOENT, "Test ENOENT for failed lookup"); + ASSERT_EQ(skel->bss->test_eafnosupport, -EAFNOSUPPORT, "Test EAFNOSUPPORT for invalid len__tuple"); + ASSERT_EQ(skel->data->test_alloc_entry, 0, "Test for alloc new entry"); + ASSERT_EQ(skel->data->test_insert_entry, 0, "Test for insert new entry"); + ASSERT_EQ(skel->data->test_succ_lookup, 0, "Test for successful lookup"); + /* allow some tolerance for test_delta_timeout value to avoid races. */ + ASSERT_GT(skel->bss->test_delta_timeout, 8, "Test for min ct timeout update"); + ASSERT_LE(skel->bss->test_delta_timeout, 10, "Test for max ct timeout update"); + ASSERT_EQ(skel->bss->test_insert_lookup_mark, 77, "Test for insert and lookup mark value"); + ASSERT_EQ(skel->bss->test_status, IPS_STATUS_MASK, "Test for ct status update "); + ASSERT_EQ(skel->data->test_exist_lookup, 0, "Test existing connection lookup"); + ASSERT_EQ(skel->bss->test_exist_lookup_mark, 43, "Test existing connection lookup ctmark"); + ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting"); + ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting"); +end: + if (client_fd != -1) + close(client_fd); + if (srv_client_fd != -1) + close(srv_client_fd); + if (srv_fd != -1) + close(srv_fd); + + snprintf(cmd, sizeof(cmd), iptables, "-D"); + system(cmd); + test_bpf_nf__destroy(skel); +} + +static void test_bpf_nf_ct_fail(const char *prog_name, const char *err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct test_bpf_nf_fail *skel; + struct bpf_program *prog; + int ret; + + skel = test_bpf_nf_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "test_bpf_nf_fail__open")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto end; + + bpf_program__set_autoload(prog, true); + + ret = test_bpf_nf_fail__load(skel); + if (!ASSERT_ERR(ret, "test_bpf_nf_fail__load must fail")) + goto end; + + if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + +end: + test_bpf_nf_fail__destroy(skel); +} + +void test_bpf_nf(void) +{ + int i; + if (test__start_subtest("xdp-ct")) + test_bpf_nf_ct(TEST_XDP); + if (test__start_subtest("tc-bpf-ct")) + test_bpf_nf_ct(TEST_TC_BPF); + for (i = 0; i < ARRAY_SIZE(test_bpf_nf_fail_tests); i++) { + if (test__start_subtest(test_bpf_nf_fail_tests[i].prog_name)) + test_bpf_nf_ct_fail(test_bpf_nf_fail_tests[i].prog_name, + test_bpf_nf_fail_tests[i].err_msg); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c new file mode 100644 index 000000000..e1c1e521c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +#define nr_iters 2 + +void serial_test_bpf_obj_id(void) +{ + const __u64 array_magic_value = 0xfaceb00c; + const __u32 array_key = 0; + const char *file = "./test_obj_id.bpf.o"; + const char *expected_prog_name = "test_obj_id"; + const char *expected_map_name = "test_map_id"; + const __u64 nsec_per_sec = 1000000000; + + struct bpf_object *objs[nr_iters] = {}; + struct bpf_link *links[nr_iters] = {}; + struct bpf_program *prog; + int prog_fds[nr_iters], map_fds[nr_iters]; + /* +1 to test for the info_len returned by kernel */ + struct bpf_prog_info prog_infos[nr_iters + 1]; + struct bpf_map_info map_infos[nr_iters + 1]; + struct bpf_link_info link_infos[nr_iters + 1]; + /* Each prog only uses one map. +1 to test nr_map_ids + * returned by kernel. + */ + __u32 map_ids[nr_iters + 1]; + char jited_insns[128], xlated_insns[128], zeros[128], tp_name[128]; + __u32 i, next_id, info_len, nr_id_found, duration = 0; + struct timespec real_time_ts, boot_time_ts; + int err = 0; + __u64 array_value; + uid_t my_uid = getuid(); + time_t now, load_time; + + err = bpf_prog_get_fd_by_id(0); + CHECK(err >= 0 || errno != ENOENT, + "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno); + + err = bpf_map_get_fd_by_id(0); + CHECK(err >= 0 || errno != ENOENT, + "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno); + + err = bpf_link_get_fd_by_id(0); + CHECK(err >= 0 || errno != ENOENT, + "get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno); + + /* Check bpf_obj_get_info_by_fd() */ + bzero(zeros, sizeof(zeros)); + for (i = 0; i < nr_iters; i++) { + now = time(NULL); + err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, + &objs[i], &prog_fds[i]); + /* test_obj_id.o is a dumb prog. It should never fail + * to load. + */ + if (CHECK_FAIL(err)) + continue; + + /* Insert a magic value to the map */ + map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); + if (CHECK_FAIL(map_fds[i] < 0)) + goto done; + err = bpf_map_update_elem(map_fds[i], &array_key, + &array_magic_value, 0); + if (CHECK_FAIL(err)) + goto done; + + prog = bpf_object__find_program_by_name(objs[i], + "test_obj_id"); + if (CHECK_FAIL(!prog)) + goto done; + links[i] = bpf_program__attach(prog); + err = libbpf_get_error(links[i]); + if (CHECK(err, "prog_attach", "prog #%d, err %d\n", i, err)) { + links[i] = NULL; + goto done; + } + + /* Check getting map info */ + info_len = sizeof(struct bpf_map_info) * 2; + bzero(&map_infos[i], info_len); + err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], + &info_len); + if (CHECK(err || + map_infos[i].type != BPF_MAP_TYPE_ARRAY || + map_infos[i].key_size != sizeof(__u32) || + map_infos[i].value_size != sizeof(__u64) || + map_infos[i].max_entries != 1 || + map_infos[i].map_flags != 0 || + info_len != sizeof(struct bpf_map_info) || + strcmp((char *)map_infos[i].name, expected_map_name), + "get-map-info(fd)", + "err %d errno %d type %d(%d) info_len %u(%zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", + err, errno, + map_infos[i].type, BPF_MAP_TYPE_ARRAY, + info_len, sizeof(struct bpf_map_info), + map_infos[i].key_size, + map_infos[i].value_size, + map_infos[i].max_entries, + map_infos[i].map_flags, + map_infos[i].name, expected_map_name)) + goto done; + + /* Check getting prog info */ + info_len = sizeof(struct bpf_prog_info) * 2; + bzero(&prog_infos[i], info_len); + bzero(jited_insns, sizeof(jited_insns)); + bzero(xlated_insns, sizeof(xlated_insns)); + prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); + prog_infos[i].jited_prog_len = sizeof(jited_insns); + prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); + prog_infos[i].xlated_prog_len = sizeof(xlated_insns); + prog_infos[i].map_ids = ptr_to_u64(map_ids + i); + prog_infos[i].nr_map_ids = 2; + err = clock_gettime(CLOCK_REALTIME, &real_time_ts); + if (CHECK_FAIL(err)) + goto done; + err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); + if (CHECK_FAIL(err)) + goto done; + err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], + &info_len); + load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + + (prog_infos[i].load_time / nsec_per_sec); + if (CHECK(err || + prog_infos[i].type != BPF_PROG_TYPE_RAW_TRACEPOINT || + info_len != sizeof(struct bpf_prog_info) || + (env.jit_enabled && !prog_infos[i].jited_prog_len) || + (env.jit_enabled && + !memcmp(jited_insns, zeros, sizeof(zeros))) || + !prog_infos[i].xlated_prog_len || + !memcmp(xlated_insns, zeros, sizeof(zeros)) || + load_time < now - 60 || load_time > now + 60 || + prog_infos[i].created_by_uid != my_uid || + prog_infos[i].nr_map_ids != 1 || + *(int *)(long)prog_infos[i].map_ids != map_infos[i].id || + strcmp((char *)prog_infos[i].name, expected_prog_name), + "get-prog-info(fd)", + "err %d errno %d i %d type %d(%d) info_len %u(%zu) " + "jit_enabled %d jited_prog_len %u xlated_prog_len %u " + "jited_prog %d xlated_prog %d load_time %lu(%lu) " + "uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) " + "name %s(%s)\n", + err, errno, i, + prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, + info_len, sizeof(struct bpf_prog_info), + env.jit_enabled, + prog_infos[i].jited_prog_len, + prog_infos[i].xlated_prog_len, + !!memcmp(jited_insns, zeros, sizeof(zeros)), + !!memcmp(xlated_insns, zeros, sizeof(zeros)), + load_time, now, + prog_infos[i].created_by_uid, my_uid, + prog_infos[i].nr_map_ids, 1, + *(int *)(long)prog_infos[i].map_ids, map_infos[i].id, + prog_infos[i].name, expected_prog_name)) + goto done; + + /* Check getting link info */ + info_len = sizeof(struct bpf_link_info) * 2; + bzero(&link_infos[i], info_len); + link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name); + link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name); + err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]), + &link_infos[i], &info_len); + if (CHECK(err || + link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT || + link_infos[i].prog_id != prog_infos[i].id || + link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) || + strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), + "sys_enter") || + info_len != sizeof(struct bpf_link_info), + "get-link-info(fd)", + "err %d errno %d info_len %u(%zu) type %d(%d) id %d " + "prog_id %d (%d) tp_name %s(%s)\n", + err, errno, + info_len, sizeof(struct bpf_link_info), + link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, + link_infos[i].id, + link_infos[i].prog_id, prog_infos[i].id, + (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), + "sys_enter")) + goto done; + + } + + /* Check bpf_prog_get_next_id() */ + nr_id_found = 0; + next_id = 0; + while (!bpf_prog_get_next_id(next_id, &next_id)) { + struct bpf_prog_info prog_info = {}; + __u32 saved_map_id; + int prog_fd; + + info_len = sizeof(prog_info); + + prog_fd = bpf_prog_get_fd_by_id(next_id); + if (prog_fd < 0 && errno == ENOENT) + /* The bpf_prog is in the dead row */ + continue; + if (CHECK(prog_fd < 0, "get-prog-fd(next_id)", + "prog_fd %d next_id %d errno %d\n", + prog_fd, next_id, errno)) + break; + + for (i = 0; i < nr_iters; i++) + if (prog_infos[i].id == next_id) + break; + + if (i == nr_iters) + continue; + + nr_id_found++; + + /* Negative test: + * prog_info.nr_map_ids = 1 + * prog_info.map_ids = NULL + */ + prog_info.nr_map_ids = 1; + err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + if (CHECK(!err || errno != EFAULT, + "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)", + err, errno, EFAULT)) + break; + bzero(&prog_info, sizeof(prog_info)); + info_len = sizeof(prog_info); + + saved_map_id = *(int *)((long)prog_infos[i].map_ids); + prog_info.map_ids = prog_infos[i].map_ids; + prog_info.nr_map_ids = 2; + err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + prog_infos[i].jited_prog_insns = 0; + prog_infos[i].xlated_prog_insns = 0; + CHECK(err || info_len != sizeof(struct bpf_prog_info) || + memcmp(&prog_info, &prog_infos[i], info_len) || + *(int *)(long)prog_info.map_ids != saved_map_id, + "get-prog-info(next_id->fd)", + "err %d errno %d info_len %u(%zu) memcmp %d map_id %u(%u)\n", + err, errno, info_len, sizeof(struct bpf_prog_info), + memcmp(&prog_info, &prog_infos[i], info_len), + *(int *)(long)prog_info.map_ids, saved_map_id); + close(prog_fd); + } + CHECK(nr_id_found != nr_iters, + "check total prog id found by get_next_id", + "nr_id_found %u(%u)\n", + nr_id_found, nr_iters); + + /* Check bpf_map_get_next_id() */ + nr_id_found = 0; + next_id = 0; + while (!bpf_map_get_next_id(next_id, &next_id)) { + struct bpf_map_info map_info = {}; + int map_fd; + + info_len = sizeof(map_info); + + map_fd = bpf_map_get_fd_by_id(next_id); + if (map_fd < 0 && errno == ENOENT) + /* The bpf_map is in the dead row */ + continue; + if (CHECK(map_fd < 0, "get-map-fd(next_id)", + "map_fd %d next_id %u errno %d\n", + map_fd, next_id, errno)) + break; + + for (i = 0; i < nr_iters; i++) + if (map_infos[i].id == next_id) + break; + + if (i == nr_iters) + continue; + + nr_id_found++; + + err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); + if (CHECK_FAIL(err)) + goto done; + + err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); + CHECK(err || info_len != sizeof(struct bpf_map_info) || + memcmp(&map_info, &map_infos[i], info_len) || + array_value != array_magic_value, + "check get-map-info(next_id->fd)", + "err %d errno %d info_len %u(%zu) memcmp %d array_value %llu(%llu)\n", + err, errno, info_len, sizeof(struct bpf_map_info), + memcmp(&map_info, &map_infos[i], info_len), + array_value, array_magic_value); + + close(map_fd); + } + CHECK(nr_id_found != nr_iters, + "check total map id found by get_next_id", + "nr_id_found %u(%u)\n", + nr_id_found, nr_iters); + + /* Check bpf_link_get_next_id() */ + nr_id_found = 0; + next_id = 0; + while (!bpf_link_get_next_id(next_id, &next_id)) { + struct bpf_link_info link_info; + int link_fd, cmp_res; + + info_len = sizeof(link_info); + memset(&link_info, 0, info_len); + + link_fd = bpf_link_get_fd_by_id(next_id); + if (link_fd < 0 && errno == ENOENT) + /* The bpf_link is in the dead row */ + continue; + if (CHECK(link_fd < 0, "get-link-fd(next_id)", + "link_fd %d next_id %u errno %d\n", + link_fd, next_id, errno)) + break; + + for (i = 0; i < nr_iters; i++) + if (link_infos[i].id == next_id) + break; + + if (i == nr_iters) + continue; + + nr_id_found++; + + err = bpf_obj_get_info_by_fd(link_fd, &link_info, &info_len); + cmp_res = memcmp(&link_info, &link_infos[i], + offsetof(struct bpf_link_info, raw_tracepoint)); + CHECK(err || info_len != sizeof(link_info) || cmp_res, + "check get-link-info(next_id->fd)", + "err %d errno %d info_len %u(%zu) memcmp %d\n", + err, errno, info_len, sizeof(struct bpf_link_info), + cmp_res); + + close(link_fd); + } + CHECK(nr_id_found != nr_iters, + "check total link id found by get_next_id", + "nr_id_found %u(%u)\n", nr_id_found, nr_iters); + +done: + for (i = 0; i < nr_iters; i++) { + bpf_link__destroy(links[i]); + bpf_object__close(objs[i]); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c new file mode 100644 index 000000000..e980188d4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <linux/err.h> +#include <netinet/tcp.h> +#include <test_progs.h> +#include "network_helpers.h" +#include "bpf_dctcp.skel.h" +#include "bpf_cubic.skel.h" +#include "bpf_tcp_nogpl.skel.h" +#include "bpf_dctcp_release.skel.h" +#include "tcp_ca_write_sk_pacing.skel.h" +#include "tcp_ca_incompl_cong_ops.skel.h" +#include "tcp_ca_unsupp_cong_op.skel.h" + +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif + +static const unsigned int total_bytes = 10 * 1024 * 1024; +static int expected_stg = 0xeB9F; +static int stop, duration; + +static int settcpca(int fd, const char *tcp_ca) +{ + int err; + + err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca)); + if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n", + errno)) + return -1; + + return 0; +} + +static void *server(void *arg) +{ + int lfd = (int)(long)arg, err = 0, fd; + ssize_t nr_sent = 0, bytes = 0; + char batch[1500]; + + fd = accept(lfd, NULL, NULL); + while (fd == -1) { + if (errno == EINTR) + continue; + err = -errno; + goto done; + } + + if (settimeo(fd, 0)) { + err = -errno; + goto done; + } + + while (bytes < total_bytes && !READ_ONCE(stop)) { + nr_sent = send(fd, &batch, + MIN(total_bytes - bytes, sizeof(batch)), 0); + if (nr_sent == -1 && errno == EINTR) + continue; + if (nr_sent == -1) { + err = -errno; + break; + } + bytes += nr_sent; + } + + CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n", + bytes, total_bytes, nr_sent, errno); + +done: + if (fd >= 0) + close(fd); + if (err) { + WRITE_ONCE(stop, 1); + return ERR_PTR(err); + } + return NULL; +} + +static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) +{ + struct sockaddr_in6 sa6 = {}; + ssize_t nr_recv = 0, bytes = 0; + int lfd = -1, fd = -1; + pthread_t srv_thread; + socklen_t addrlen = sizeof(sa6); + void *thread_ret; + char batch[1500]; + int err; + + WRITE_ONCE(stop, 0); + + lfd = socket(AF_INET6, SOCK_STREAM, 0); + if (CHECK(lfd == -1, "socket", "errno:%d\n", errno)) + return; + fd = socket(AF_INET6, SOCK_STREAM, 0); + if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) { + close(lfd); + return; + } + + if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) || + settimeo(lfd, 0) || settimeo(fd, 0)) + goto done; + + /* bind, listen and start server thread to accept */ + sa6.sin6_family = AF_INET6; + sa6.sin6_addr = in6addr_loopback; + err = bind(lfd, (struct sockaddr *)&sa6, addrlen); + if (CHECK(err == -1, "bind", "errno:%d\n", errno)) + goto done; + err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen); + if (CHECK(err == -1, "getsockname", "errno:%d\n", errno)) + goto done; + err = listen(lfd, 1); + if (CHECK(err == -1, "listen", "errno:%d\n", errno)) + goto done; + + if (sk_stg_map) { + err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd, + &expected_stg, BPF_NOEXIST); + if (CHECK(err, "bpf_map_update_elem(sk_stg_map)", + "err:%d errno:%d\n", err, errno)) + goto done; + } + + /* connect to server */ + err = connect(fd, (struct sockaddr *)&sa6, addrlen); + if (CHECK(err == -1, "connect", "errno:%d\n", errno)) + goto done; + + if (sk_stg_map) { + int tmp_stg; + + err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd, + &tmp_stg); + if (CHECK(!err || errno != ENOENT, + "bpf_map_lookup_elem(sk_stg_map)", + "err:%d errno:%d\n", err, errno)) + goto done; + } + + err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd); + if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno)) + goto done; + + /* recv total_bytes */ + while (bytes < total_bytes && !READ_ONCE(stop)) { + nr_recv = recv(fd, &batch, + MIN(total_bytes - bytes, sizeof(batch)), 0); + if (nr_recv == -1 && errno == EINTR) + continue; + if (nr_recv == -1) + break; + bytes += nr_recv; + } + + CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n", + bytes, total_bytes, nr_recv, errno); + + WRITE_ONCE(stop, 1); + pthread_join(srv_thread, &thread_ret); + CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld", + PTR_ERR(thread_ret)); +done: + close(lfd); + close(fd); +} + +static void test_cubic(void) +{ + struct bpf_cubic *cubic_skel; + struct bpf_link *link; + + cubic_skel = bpf_cubic__open_and_load(); + if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n")) + return; + + link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic); + if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) { + bpf_cubic__destroy(cubic_skel); + return; + } + + do_test("bpf_cubic", NULL); + + bpf_link__destroy(link); + bpf_cubic__destroy(cubic_skel); +} + +static void test_dctcp(void) +{ + struct bpf_dctcp *dctcp_skel; + struct bpf_link *link; + + dctcp_skel = bpf_dctcp__open_and_load(); + if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n")) + return; + + link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); + if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) { + bpf_dctcp__destroy(dctcp_skel); + return; + } + + do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map); + CHECK(dctcp_skel->bss->stg_result != expected_stg, + "Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n", + dctcp_skel->bss->stg_result, expected_stg); + + bpf_link__destroy(link); + bpf_dctcp__destroy(dctcp_skel); +} + +static char *err_str; +static bool found; + +static int libbpf_debug_print(enum libbpf_print_level level, + const char *format, va_list args) +{ + const char *prog_name, *log_buf; + + if (level != LIBBPF_WARN || + !strstr(format, "-- BEGIN PROG LOAD LOG --")) { + vprintf(format, args); + return 0; + } + + prog_name = va_arg(args, char *); + log_buf = va_arg(args, char *); + if (!log_buf) + goto out; + if (err_str && strstr(log_buf, err_str) != NULL) + found = true; +out: + printf(format, prog_name, log_buf); + return 0; +} + +static void test_invalid_license(void) +{ + libbpf_print_fn_t old_print_fn; + struct bpf_tcp_nogpl *skel; + + err_str = "struct ops programs must have a GPL compatible license"; + found = false; + old_print_fn = libbpf_set_print(libbpf_debug_print); + + skel = bpf_tcp_nogpl__open_and_load(); + ASSERT_NULL(skel, "bpf_tcp_nogpl"); + ASSERT_EQ(found, true, "expected_err_msg"); + + bpf_tcp_nogpl__destroy(skel); + libbpf_set_print(old_print_fn); +} + +static void test_dctcp_fallback(void) +{ + int err, lfd = -1, cli_fd = -1, srv_fd = -1; + struct network_helper_opts opts = { + .cc = "cubic", + }; + struct bpf_dctcp *dctcp_skel; + struct bpf_link *link = NULL; + char srv_cc[16]; + socklen_t cc_len = sizeof(srv_cc); + + dctcp_skel = bpf_dctcp__open(); + if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel")) + return; + strcpy(dctcp_skel->rodata->fallback, "cubic"); + if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load")) + goto done; + + link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); + if (!ASSERT_OK_PTR(link, "dctcp link")) + goto done; + + lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + if (!ASSERT_GE(lfd, 0, "lfd") || + !ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp")) + goto done; + + cli_fd = connect_to_fd_opts(lfd, &opts); + if (!ASSERT_GE(cli_fd, 0, "cli_fd")) + goto done; + + srv_fd = accept(lfd, NULL, 0); + if (!ASSERT_GE(srv_fd, 0, "srv_fd")) + goto done; + ASSERT_STREQ(dctcp_skel->bss->cc_res, "cubic", "cc_res"); + ASSERT_EQ(dctcp_skel->bss->tcp_cdg_res, -ENOTSUPP, "tcp_cdg_res"); + /* All setsockopt(TCP_CONGESTION) in the recurred + * bpf_dctcp->init() should fail with -EBUSY. + */ + ASSERT_EQ(dctcp_skel->bss->ebusy_cnt, 3, "ebusy_cnt"); + + err = getsockopt(srv_fd, SOL_TCP, TCP_CONGESTION, srv_cc, &cc_len); + if (!ASSERT_OK(err, "getsockopt(srv_fd, TCP_CONGESTION)")) + goto done; + ASSERT_STREQ(srv_cc, "cubic", "srv_fd cc"); + +done: + bpf_link__destroy(link); + bpf_dctcp__destroy(dctcp_skel); + if (lfd != -1) + close(lfd); + if (srv_fd != -1) + close(srv_fd); + if (cli_fd != -1) + close(cli_fd); +} + +static void test_rel_setsockopt(void) +{ + struct bpf_dctcp_release *rel_skel; + libbpf_print_fn_t old_print_fn; + + err_str = "unknown func bpf_setsockopt"; + found = false; + + old_print_fn = libbpf_set_print(libbpf_debug_print); + rel_skel = bpf_dctcp_release__open_and_load(); + libbpf_set_print(old_print_fn); + + ASSERT_ERR_PTR(rel_skel, "rel_skel"); + ASSERT_TRUE(found, "expected_err_msg"); + + bpf_dctcp_release__destroy(rel_skel); +} + +static void test_write_sk_pacing(void) +{ + struct tcp_ca_write_sk_pacing *skel; + struct bpf_link *link; + + skel = tcp_ca_write_sk_pacing__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing); + ASSERT_OK_PTR(link, "attach_struct_ops"); + + bpf_link__destroy(link); + tcp_ca_write_sk_pacing__destroy(skel); +} + +static void test_incompl_cong_ops(void) +{ + struct tcp_ca_incompl_cong_ops *skel; + struct bpf_link *link; + + skel = tcp_ca_incompl_cong_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + /* That cong_avoid() and cong_control() are missing is only reported at + * this point: + */ + link = bpf_map__attach_struct_ops(skel->maps.incompl_cong_ops); + ASSERT_ERR_PTR(link, "attach_struct_ops"); + + bpf_link__destroy(link); + tcp_ca_incompl_cong_ops__destroy(skel); +} + +static void test_unsupp_cong_op(void) +{ + libbpf_print_fn_t old_print_fn; + struct tcp_ca_unsupp_cong_op *skel; + + err_str = "attach to unsupported member get_info"; + found = false; + old_print_fn = libbpf_set_print(libbpf_debug_print); + + skel = tcp_ca_unsupp_cong_op__open_and_load(); + ASSERT_NULL(skel, "open_and_load"); + ASSERT_EQ(found, true, "expected_err_msg"); + + tcp_ca_unsupp_cong_op__destroy(skel); + libbpf_set_print(old_print_fn); +} + +void test_bpf_tcp_ca(void) +{ + if (test__start_subtest("dctcp")) + test_dctcp(); + if (test__start_subtest("cubic")) + test_cubic(); + if (test__start_subtest("invalid_license")) + test_invalid_license(); + if (test__start_subtest("dctcp_fallback")) + test_dctcp_fallback(); + if (test__start_subtest("rel_setsockopt")) + test_rel_setsockopt(); + if (test__start_subtest("write_sk_pacing")) + test_write_sk_pacing(); + if (test__start_subtest("incompl_cong_ops")) + test_incompl_cong_ops(); + if (test__start_subtest("unsupp_cong_op")) + test_unsupp_cong_op(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c new file mode 100644 index 000000000..5ca252823 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook +#include <test_progs.h> +static int libbpf_debug_print(enum libbpf_print_level level, + const char *format, va_list args) +{ + if (level != LIBBPF_DEBUG) { + vprintf(format, args); + return 0; + } + + if (!strstr(format, "verifier log")) + return 0; + vprintf("%s", args); + return 0; +} + +extern int extra_prog_load_log_flags; + +static int check_load(const char *file, enum bpf_prog_type type) +{ + struct bpf_object *obj = NULL; + struct bpf_program *prog; + int err; + + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (err) + return err; + + prog = bpf_object__next_program(obj, NULL); + if (!prog) { + err = -ENOENT; + goto err_out; + } + + bpf_program__set_type(prog, type); + bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); + bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags); + + err = bpf_object__load(obj); + +err_out: + bpf_object__close(obj); + return err; +} + +struct scale_test_def { + const char *file; + enum bpf_prog_type attach_type; + bool fails; +}; + +static void scale_test(const char *file, + enum bpf_prog_type attach_type, + bool should_fail) +{ + libbpf_print_fn_t old_print_fn = NULL; + int err; + + if (env.verifier_stats) { + test__force_log(); + old_print_fn = libbpf_set_print(libbpf_debug_print); + } + + err = check_load(file, attach_type); + if (should_fail) + ASSERT_ERR(err, "expect_error"); + else + ASSERT_OK(err, "expect_success"); + + if (env.verifier_stats) + libbpf_set_print(old_print_fn); +} + +void test_verif_scale1() +{ + scale_test("test_verif_scale1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale2() +{ + scale_test("test_verif_scale2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale3() +{ + scale_test("test_verif_scale3.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale_pyperf_global() +{ + scale_test("pyperf_global.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf_subprogs() +{ + scale_test("pyperf_subprogs.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf50() +{ + /* full unroll by llvm */ + scale_test("pyperf50.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf100() +{ + /* full unroll by llvm */ + scale_test("pyperf100.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf180() +{ + /* full unroll by llvm */ + scale_test("pyperf180.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf600() +{ + /* partial unroll. llvm will unroll loop ~150 times. + * C loop count -> 600. + * Asm loop count -> 4. + * 16k insns in loop body. + * Total of 5 such loops. Total program size ~82k insns. + */ + scale_test("pyperf600.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf600_bpf_loop(void) +{ + /* use the bpf_loop helper*/ + scale_test("pyperf600_bpf_loop.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_pyperf600_nounroll() +{ + /* no unroll at all. + * C loop count -> 600. + * ASM loop count -> 600. + * ~110 insns in loop body. + * Total of 5 such loops. Total program size ~1500 insns. + */ + scale_test("pyperf600_nounroll.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_loop1() +{ + scale_test("loop1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_loop2() +{ + scale_test("loop2.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_loop3_fail() +{ + scale_test("loop3.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */); +} + +void test_verif_scale_loop4() +{ + scale_test("loop4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale_loop5() +{ + scale_test("loop5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, false); +} + +void test_verif_scale_loop6() +{ + scale_test("loop6.bpf.o", BPF_PROG_TYPE_KPROBE, false); +} + +void test_verif_scale_strobemeta() +{ + /* partial unroll. 19k insn in a loop. + * Total program size 20.8k insn. + * ~350k processed_insns + */ + scale_test("strobemeta.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_bpf_loop(void) +{ + /* use the bpf_loop helper*/ + scale_test("strobemeta_bpf_loop.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_nounroll1() +{ + /* no unroll, tiny loops */ + scale_test("strobemeta_nounroll1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_nounroll2() +{ + /* no unroll, tiny loops */ + scale_test("strobemeta_nounroll2.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_strobemeta_subprogs() +{ + /* non-inlined subprogs */ + scale_test("strobemeta_subprogs.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + +void test_verif_scale_sysctl_loop1() +{ + scale_test("test_sysctl_loop1.bpf.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); +} + +void test_verif_scale_sysctl_loop2() +{ + scale_test("test_sysctl_loop2.bpf.o", BPF_PROG_TYPE_CGROUP_SYSCTL, false); +} + +void test_verif_scale_xdp_loop() +{ + scale_test("test_xdp_loop.bpf.o", BPF_PROG_TYPE_XDP, false); +} + +void test_verif_scale_seg6_loop() +{ + scale_test("test_seg6_loop.bpf.o", BPF_PROG_TYPE_LWT_SEG6LOCAL, false); +} + +void test_verif_twfw() +{ + scale_test("twfw.bpf.o", BPF_PROG_TYPE_CGROUP_SKB, false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c new file mode 100644 index 000000000..d711f4bea --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -0,0 +1,7873 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Facebook */ + +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/filter.h> +#include <linux/unistd.h> +#include <bpf/bpf.h> +#include <libelf.h> +#include <gelf.h> +#include <string.h> +#include <stdlib.h> +#include <stdio.h> +#include <stdarg.h> +#include <unistd.h> +#include <fcntl.h> +#include <errno.h> +#include <assert.h> +#include <bpf/libbpf.h> +#include <bpf/btf.h> + +#include "bpf_util.h" +#include "../test_btf.h" +#include "test_progs.h" + +#define MAX_INSNS 512 +#define MAX_SUBPROGS 16 + +static int duration = 0; +static bool always_log; + +#undef CHECK +#define CHECK(condition, format...) _CHECK(condition, "check", duration, format) + +#define NAME_TBD 0xdeadb33f + +#define NAME_NTH(N) (0xfffe0000 | N) +#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xfffe0000) +#define GET_NAME_NTH_IDX(X) (X & 0x0000ffff) + +#define MAX_NR_RAW_U32 1024 +#define BTF_LOG_BUF_SIZE 65535 + +static char btf_log_buf[BTF_LOG_BUF_SIZE]; + +static struct btf_header hdr_tmpl = { + .magic = BTF_MAGIC, + .version = BTF_VERSION, + .hdr_len = sizeof(struct btf_header), +}; + +/* several different mapv kinds(types) supported by pprint */ +enum pprint_mapv_kind_t { + PPRINT_MAPV_KIND_BASIC = 0, + PPRINT_MAPV_KIND_INT128, +}; + +struct btf_raw_test { + const char *descr; + const char *str_sec; + const char *map_name; + const char *err_str; + __u32 raw_types[MAX_NR_RAW_U32]; + __u32 str_sec_size; + enum bpf_map_type map_type; + __u32 key_size; + __u32 value_size; + __u32 key_type_id; + __u32 value_type_id; + __u32 max_entries; + bool btf_load_err; + bool map_create_err; + bool ordered_map; + bool lossless_map; + bool percpu_map; + int hdr_len_delta; + int type_off_delta; + int str_off_delta; + int str_len_delta; + enum pprint_mapv_kind_t mapv_kind; +}; + +#define BTF_STR_SEC(str) \ + .str_sec = str, .str_sec_size = sizeof(str) + +static struct btf_raw_test raw_tests[] = { +/* enum E { + * E0, + * E1, + * }; + * + * struct A { + * unsigned long long m; + * int n; + * char o; + * [3 bytes hole] + * int p[8]; + * int q[4][8]; + * enum E r; + * }; + */ +{ + .descr = "struct test #1", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 6), 180), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + BTF_MEMBER_ENC(NAME_TBD, 6, 384),/* int q[4][8] */ + BTF_MEMBER_ENC(NAME_TBD, 7, 1408), /* enum E r */ + /* } */ + /* int[4][8] */ + BTF_TYPE_ARRAY_ENC(4, 1, 4), /* [6] */ + /* enum E */ /* [7] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), sizeof(int)), + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_ENUM_ENC(NAME_TBD, 1), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p\0q\0r\0E\0E0\0E1", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0q\0r\0E\0E0\0E1"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_test1_map", + .key_size = sizeof(int), + .value_size = 180, + .key_type_id = 1, + .value_type_id = 5, + .max_entries = 4, +}, + +/* typedef struct b Struct_B; + * + * struct A { + * int m; + * struct b n[4]; + * const Struct_B o[4]; + * }; + * + * struct B { + * int m; + * int n; + * }; + */ +{ + .descr = "struct test #2", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* struct b [4] */ /* [2] */ + BTF_TYPE_ARRAY_ENC(4, 1, 4), + + /* struct A { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 3), 68), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */ + BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* struct B n[4] */ + BTF_MEMBER_ENC(NAME_TBD, 8, 288),/* const Struct_B o[4];*/ + /* } */ + + /* struct B { */ /* [4] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */ + BTF_MEMBER_ENC(NAME_TBD, 1, 32),/* int n; */ + /* } */ + + /* const int */ /* [5] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1), + /* typedef struct b Struct_B */ /* [6] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), 4), + /* const Struct_B */ /* [7] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 6), + /* const Struct_B [4] */ /* [8] */ + BTF_TYPE_ARRAY_ENC(7, 1, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0B\0m\0n\0Struct_B", + .str_sec_size = sizeof("\0A\0m\0n\0o\0B\0m\0n\0Struct_B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_test2_map", + .key_size = sizeof(int), + .value_size = 68, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 4, +}, +{ + .descr = "struct test #3 Invalid member offset", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int64 */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), + + /* struct A { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 16), + BTF_MEMBER_ENC(NAME_TBD, 1, 64), /* int m; */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* int64 n; */ + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0", + .str_sec_size = sizeof("\0A\0m\0n\0"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_test3_map", + .key_size = sizeof(int), + .value_size = 16, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid member bits_offset", +}, +/* + * struct A { + * unsigned long long m; + * int n; + * char o; + * [3 bytes hole] + * int p[8]; + * }; + */ +{ + .descr = "global data test #1", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_test1_map", + .key_size = sizeof(int), + .value_size = 48, + .key_type_id = 1, + .value_type_id = 5, + .max_entries = 4, +}, +/* + * struct A { + * unsigned long long m; + * int n; + * char o; + * [3 bytes hole] + * int p[8]; + * }; + * static struct A t; <- in .bss + */ +{ + .descr = "global data test #2", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + /* } */ + /* static struct A t */ + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */ + /* .bss section */ /* [7] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 48), + BTF_VAR_SECINFO_ENC(6, 0, 48), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 48, + .key_type_id = 0, + .value_type_id = 7, + .max_entries = 1, +}, +{ + .descr = "global data test #3", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* static int t */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + /* .bss section */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(2, 0, 4), + BTF_END_RAW, + }, + .str_sec = "\0t\0.bss", + .str_sec_size = sizeof("\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 3, + .max_entries = 1, +}, +{ + .descr = "global data test #4, unsupported linkage", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* static int t */ + BTF_VAR_ENC(NAME_TBD, 1, 2), /* [2] */ + /* .bss section */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(2, 0, 4), + BTF_END_RAW, + }, + .str_sec = "\0t\0.bss", + .str_sec_size = sizeof("\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 3, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Linkage not supported", +}, +{ + .descr = "global data test #5, invalid var type", + .raw_types = { + /* static void t */ + BTF_VAR_ENC(NAME_TBD, 0, 0), /* [1] */ + /* .bss section */ /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(1, 0, 4), + BTF_END_RAW, + }, + .str_sec = "\0t\0.bss", + .str_sec_size = sizeof("\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 2, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type_id", +}, +{ + .descr = "global data test #6, invalid var type (fwd type)", + .raw_types = { + /* union A */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */ + /* static union A t */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + /* .bss section */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(2, 0, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0.bss", + .str_sec_size = sizeof("\0A\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 2, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type", +}, +{ + .descr = "global data test #7, invalid var type (fwd type)", + .raw_types = { + /* union A */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */ + /* static union A t */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + /* .bss section */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(1, 0, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0.bss", + .str_sec_size = sizeof("\0A\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 2, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type", +}, +{ + .descr = "global data test #8, invalid var size", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + /* } */ + /* static struct A t */ + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */ + /* .bss section */ /* [7] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 48), + BTF_VAR_SECINFO_ENC(6, 0, 47), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 48, + .key_type_id = 0, + .value_type_id = 7, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid size", +}, +{ + .descr = "global data test #9, invalid var size", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + /* } */ + /* static struct A t */ + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */ + /* .bss section */ /* [7] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 46), + BTF_VAR_SECINFO_ENC(6, 0, 48), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 48, + .key_type_id = 0, + .value_type_id = 7, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid size", +}, +{ + .descr = "global data test #10, invalid var size", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + /* } */ + /* static struct A t */ + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */ + /* .bss section */ /* [7] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 46), + BTF_VAR_SECINFO_ENC(6, 0, 46), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 48, + .key_type_id = 0, + .value_type_id = 7, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid size", +}, +{ + .descr = "global data test #11, multiple section members", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + /* } */ + /* static struct A t */ + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */ + /* static int u */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */ + /* .bss section */ /* [8] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62), + BTF_VAR_SECINFO_ENC(6, 10, 48), + BTF_VAR_SECINFO_ENC(7, 58, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 62, + .key_type_id = 0, + .value_type_id = 8, + .max_entries = 1, +}, +{ + .descr = "global data test #12, invalid offset", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + /* } */ + /* static struct A t */ + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */ + /* static int u */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */ + /* .bss section */ /* [8] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62), + BTF_VAR_SECINFO_ENC(6, 10, 48), + BTF_VAR_SECINFO_ENC(7, 60, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 62, + .key_type_id = 0, + .value_type_id = 8, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid offset+size", +}, +{ + .descr = "global data test #13, invalid offset", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + /* } */ + /* static struct A t */ + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */ + /* static int u */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */ + /* .bss section */ /* [8] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62), + BTF_VAR_SECINFO_ENC(6, 10, 48), + BTF_VAR_SECINFO_ENC(7, 12, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 62, + .key_type_id = 0, + .value_type_id = 8, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid offset", +}, +{ + .descr = "global data test #14, invalid offset", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* unsigned long long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + /* char */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */ + /* int[8] */ + BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/ + BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */ + /* } */ + /* static struct A t */ + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */ + /* static int u */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */ + /* .bss section */ /* [8] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62), + BTF_VAR_SECINFO_ENC(7, 58, 4), + BTF_VAR_SECINFO_ENC(6, 10, 48), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss", + .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 62, + .key_type_id = 0, + .value_type_id = 8, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid offset", +}, +{ + .descr = "global data test #15, not var kind", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + /* .bss section */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(1, 0, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0.bss", + .str_sec_size = sizeof("\0A\0t\0.bss"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 3, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Not a VAR kind member", +}, +{ + .descr = "global data test #16, invalid var referencing sec", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [2] */ + BTF_VAR_ENC(NAME_TBD, 2, 0), /* [3] */ + /* a section */ /* [4] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(3, 0, 4), + /* a section */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(6, 0, 4), + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [6] */ + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0s\0a\0a", + .str_sec_size = sizeof("\0A\0t\0s\0a\0a"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 4, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type_id", +}, +{ + .descr = "global data test #17, invalid var referencing var", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_VAR_ENC(NAME_TBD, 2, 0), /* [3] */ + /* a section */ /* [4] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(3, 0, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0s\0a\0a", + .str_sec_size = sizeof("\0A\0t\0s\0a\0a"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 4, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type_id", +}, +{ + .descr = "global data test #18, invalid var loop", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 2, 0), /* [2] */ + /* .bss section */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(2, 0, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0aaa", + .str_sec_size = sizeof("\0A\0t\0aaa"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 4, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type_id", +}, +{ + .descr = "global data test #19, invalid var referencing var", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 3, 0), /* [2] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */ + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0s\0a\0a", + .str_sec_size = sizeof("\0A\0t\0s\0a\0a"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 4, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type_id", +}, +{ + .descr = "global data test #20, invalid ptr referencing var", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* PTR type_id=3 */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3), + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */ + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0s\0a\0a", + .str_sec_size = sizeof("\0A\0t\0s\0a\0a"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 4, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type_id", +}, +{ + .descr = "global data test #21, var included in struct", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* struct A { */ /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 2), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 32),/* VAR type_id=3; */ + /* } */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */ + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0s\0a\0a", + .str_sec_size = sizeof("\0A\0t\0s\0a\0a"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 4, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid member", +}, +{ + .descr = "global data test #22, array of var", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ARRAY_ENC(3, 1, 4), /* [2] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */ + BTF_END_RAW, + }, + .str_sec = "\0A\0t\0s\0a\0a", + .str_sec_size = sizeof("\0A\0t\0s\0a\0a"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 0, + .value_type_id = 4, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid elem", +}, +{ + .descr = "var after datasec, ptr followed by modifier", + .raw_types = { + /* .bss section */ /* [1] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), + sizeof(void*)+4), + BTF_VAR_SECINFO_ENC(4, 0, sizeof(void*)), + BTF_VAR_SECINFO_ENC(6, sizeof(void*), 4), + /* int */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int* */ /* [3] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), + BTF_VAR_ENC(NAME_TBD, 3, 0), /* [4] */ + /* const int */ /* [5] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2), + BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */ + BTF_END_RAW, + }, + .str_sec = "\0a\0b\0c\0", + .str_sec_size = sizeof("\0a\0b\0c\0"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = ".bss", + .key_size = sizeof(int), + .value_size = sizeof(void*)+4, + .key_type_id = 0, + .value_type_id = 1, + .max_entries = 1, +}, +/* Test member exceeds the size of struct. + * + * struct A { + * int m; + * int n; + * }; + */ +{ + .descr = "size check test #1", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* struct A { */ /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 2 - 1), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */ + BTF_MEMBER_ENC(NAME_TBD, 1, 32),/* int n; */ + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n", + .str_sec_size = sizeof("\0A\0m\0n"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "size_check1_map", + .key_size = sizeof(int), + .value_size = 1, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Member exceeds struct_size", +}, + +/* Test member exceeds the size of struct + * + * struct A { + * int m; + * int n[2]; + * }; + */ +{ + .descr = "size check test #2", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)), + /* int[2] */ /* [2] */ + BTF_TYPE_ARRAY_ENC(1, 1, 2), + /* struct A { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 3 - 1), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */ + BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* int n[2]; */ + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n", + .str_sec_size = sizeof("\0A\0m\0n"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "size_check2_map", + .key_size = sizeof(int), + .value_size = 1, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Member exceeds struct_size", +}, + +/* Test member exceeds the size of struct + * + * struct A { + * int m; + * void *n; + * }; + */ +{ + .descr = "size check test #3", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)), + /* void* */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), + /* struct A { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) + sizeof(void *) - 1), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */ + BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* void *n; */ + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0n", + .str_sec_size = sizeof("\0A\0m\0n"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "size_check3_map", + .key_size = sizeof(int), + .value_size = 1, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Member exceeds struct_size", +}, + +/* Test member exceeds the size of struct + * + * enum E { + * E0, + * E1, + * }; + * + * struct A { + * int m; + * enum E n; + * }; + */ +{ + .descr = "size check test #4", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)), + /* enum E { */ /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), sizeof(int)), + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_ENUM_ENC(NAME_TBD, 1), + /* } */ + /* struct A { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 2 - 1), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */ + BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* enum E n; */ + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0E\0E0\0E1\0A\0m\0n", + .str_sec_size = sizeof("\0E\0E0\0E1\0A\0m\0n"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "size_check4_map", + .key_size = sizeof(int), + .value_size = 1, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Member exceeds struct_size", +}, + +/* Test member unexceeds the size of struct + * + * enum E { + * E0, + * E1, + * }; + * + * struct A { + * char m; + * enum E __attribute__((packed)) n; + * }; + */ +{ + .descr = "size check test #5", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)), + /* char */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), + /* enum E { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 1), + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_ENUM_ENC(NAME_TBD, 1), + /* } */ + /* struct A { */ /* [4] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 2), + BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* char m; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 8),/* enum E __attribute__((packed)) n; */ + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0E\0E0\0E1\0A\0m\0n", + .str_sec_size = sizeof("\0E\0E0\0E1\0A\0m\0n"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "size_check5_map", + .key_size = sizeof(int), + .value_size = 2, + .key_type_id = 1, + .value_type_id = 4, + .max_entries = 4, +}, + +/* typedef const void * const_void_ptr; + * struct A { + * const_void_ptr m; + * }; + */ +{ + .descr = "void test #1", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* const void */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), + /* const void* */ /* [3] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), + /* typedef const void * const_void_ptr */ + BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */ + /* struct A { */ /* [5] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)), + /* const_void_ptr m; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 0), + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0const_void_ptr\0A\0m", + .str_sec_size = sizeof("\0const_void_ptr\0A\0m"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "void_test1_map", + .key_size = sizeof(int), + .value_size = sizeof(void *), + .key_type_id = 1, + .value_type_id = 4, + .max_entries = 4, +}, + +/* struct A { + * const void m; + * }; + */ +{ + .descr = "void test #2", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* const void */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), + /* struct A { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 8), + /* const void m; */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + /* } */ + BTF_END_RAW, + }, + .str_sec = "\0A\0m", + .str_sec_size = sizeof("\0A\0m"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "void_test2_map", + .key_size = sizeof(int), + .value_size = sizeof(void *), + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid member", +}, + +/* typedef const void * const_void_ptr; + * const_void_ptr[4] + */ +{ + .descr = "void test #3", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* const void */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), + /* const void* */ /* [3] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), + /* typedef const void * const_void_ptr */ + BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */ + /* const_void_ptr[4] */ + BTF_TYPE_ARRAY_ENC(4, 1, 4), /* [5] */ + BTF_END_RAW, + }, + .str_sec = "\0const_void_ptr", + .str_sec_size = sizeof("\0const_void_ptr"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "void_test3_map", + .key_size = sizeof(int), + .value_size = sizeof(void *) * 4, + .key_type_id = 1, + .value_type_id = 5, + .max_entries = 4, +}, + +/* const void[4] */ +{ + .descr = "void test #4", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* const void */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), + /* const void[4] */ /* [3] */ + BTF_TYPE_ARRAY_ENC(2, 1, 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0m", + .str_sec_size = sizeof("\0A\0m"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "void_test4_map", + .key_size = sizeof(int), + .value_size = sizeof(void *) * 4, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid elem", +}, + +/* Array_A <------------------+ + * elem_type == Array_B | + * | | + * | | + * Array_B <-------- + | + * elem_type == Array A --+ + */ +{ + .descr = "loop test #1", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* Array_A */ /* [2] */ + BTF_TYPE_ARRAY_ENC(3, 1, 8), + /* Array_B */ /* [3] */ + BTF_TYPE_ARRAY_ENC(2, 1, 8), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "loop_test1_map", + .key_size = sizeof(int), + .value_size = sizeof(sizeof(int) * 8), + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Loop detected", +}, + +/* typedef is _before_ the BTF type of Array_A and Array_B + * + * typedef Array_B int_array; + * + * Array_A <------------------+ + * elem_type == int_array | + * | | + * | | + * Array_B <-------- + | + * elem_type == Array_A --+ + */ +{ + .descr = "loop test #2", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* typedef Array_B int_array */ + BTF_TYPEDEF_ENC(1, 4), /* [2] */ + /* Array_A */ + BTF_TYPE_ARRAY_ENC(2, 1, 8), /* [3] */ + /* Array_B */ + BTF_TYPE_ARRAY_ENC(3, 1, 8), /* [4] */ + BTF_END_RAW, + }, + .str_sec = "\0int_array\0", + .str_sec_size = sizeof("\0int_array"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "loop_test2_map", + .key_size = sizeof(int), + .value_size = sizeof(sizeof(int) * 8), + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Loop detected", +}, + +/* Array_A <------------------+ + * elem_type == Array_B | + * | | + * | | + * Array_B <-------- + | + * elem_type == Array_A --+ + */ +{ + .descr = "loop test #3", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* Array_A */ /* [2] */ + BTF_TYPE_ARRAY_ENC(3, 1, 8), + /* Array_B */ /* [3] */ + BTF_TYPE_ARRAY_ENC(2, 1, 8), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "loop_test3_map", + .key_size = sizeof(int), + .value_size = sizeof(sizeof(int) * 8), + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Loop detected", +}, + +/* typedef is _between_ the BTF type of Array_A and Array_B + * + * typedef Array_B int_array; + * + * Array_A <------------------+ + * elem_type == int_array | + * | | + * | | + * Array_B <-------- + | + * elem_type == Array_A --+ + */ +{ + .descr = "loop test #4", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* Array_A */ /* [2] */ + BTF_TYPE_ARRAY_ENC(3, 1, 8), + /* typedef Array_B int_array */ /* [3] */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), + /* Array_B */ /* [4] */ + BTF_TYPE_ARRAY_ENC(2, 1, 8), + BTF_END_RAW, + }, + .str_sec = "\0int_array\0", + .str_sec_size = sizeof("\0int_array"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "loop_test4_map", + .key_size = sizeof(int), + .value_size = sizeof(sizeof(int) * 8), + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Loop detected", +}, + +/* typedef struct B Struct_B + * + * struct A { + * int x; + * Struct_B y; + * }; + * + * struct B { + * int x; + * struct A y; + * }; + */ +{ + .descr = "loop test #5", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* struct A */ /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int x; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 32),/* Struct_B y; */ + /* typedef struct B Struct_B */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ + /* struct B */ /* [4] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int x; */ + BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* struct A y; */ + BTF_END_RAW, + }, + .str_sec = "\0A\0x\0y\0Struct_B\0B\0x\0y", + .str_sec_size = sizeof("\0A\0x\0y\0Struct_B\0B\0x\0y"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "loop_test5_map", + .key_size = sizeof(int), + .value_size = 8, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Loop detected", +}, + +/* struct A { + * int x; + * struct A array_a[4]; + * }; + */ +{ + .descr = "loop test #6", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ARRAY_ENC(3, 1, 4), /* [2] */ + /* struct A */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int x; */ + BTF_MEMBER_ENC(NAME_TBD, 2, 32),/* struct A array_a[4]; */ + BTF_END_RAW, + }, + .str_sec = "\0A\0x\0y", + .str_sec_size = sizeof("\0A\0x\0y"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "loop_test6_map", + .key_size = sizeof(int), + .value_size = 8, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Loop detected", +}, + +{ + .descr = "loop test #7", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* struct A { */ /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)), + /* const void *m; */ + BTF_MEMBER_ENC(NAME_TBD, 3, 0), + /* CONST type_id=3 */ /* [3] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 4), + /* PTR type_id=2 */ /* [4] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3), + BTF_END_RAW, + }, + .str_sec = "\0A\0m", + .str_sec_size = sizeof("\0A\0m"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "loop_test7_map", + .key_size = sizeof(int), + .value_size = sizeof(void *), + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Loop detected", +}, + +{ + .descr = "loop test #8", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* struct A { */ /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)), + /* const void *m; */ + BTF_MEMBER_ENC(NAME_TBD, 4, 0), + /* struct B { */ /* [3] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)), + /* const void *n; */ + BTF_MEMBER_ENC(NAME_TBD, 6, 0), + /* CONST type_id=5 */ /* [4] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 5), + /* PTR type_id=6 */ /* [5] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 6), + /* CONST type_id=7 */ /* [6] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 7), + /* PTR type_id=4 */ /* [7] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 4), + BTF_END_RAW, + }, + .str_sec = "\0A\0m\0B\0n", + .str_sec_size = sizeof("\0A\0m\0B\0n"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "loop_test8_map", + .key_size = sizeof(int), + .value_size = sizeof(void *), + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Loop detected", +}, + +{ + .descr = "string section does not end with null", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "\0int", + .str_sec_size = sizeof("\0int") - 1, + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "hdr_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid string section", +}, + +{ + .descr = "empty string section", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = 0, + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "hdr_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid string section", +}, + +{ + .descr = "empty type section", + .raw_types = { + BTF_END_RAW, + }, + .str_sec = "\0int", + .str_sec_size = sizeof("\0int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "hdr_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "No type found", +}, + +{ + .descr = "btf_header test. Longer hdr_len", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "\0int", + .str_sec_size = sizeof("\0int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "hdr_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .hdr_len_delta = 4, + .err_str = "Unsupported btf_header", +}, + +{ + .descr = "btf_header test. Gap between hdr and type", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "\0int", + .str_sec_size = sizeof("\0int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "hdr_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .type_off_delta = 4, + .err_str = "Unsupported section found", +}, + +{ + .descr = "btf_header test. Gap between type and str", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "\0int", + .str_sec_size = sizeof("\0int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "hdr_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .str_off_delta = 4, + .err_str = "Unsupported section found", +}, + +{ + .descr = "btf_header test. Overlap between type and str", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "\0int", + .str_sec_size = sizeof("\0int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "hdr_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .str_off_delta = -4, + .err_str = "Section overlap found", +}, + +{ + .descr = "btf_header test. Larger BTF size", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "\0int", + .str_sec_size = sizeof("\0int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "hdr_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .str_len_delta = -4, + .err_str = "Unsupported section found", +}, + +{ + .descr = "btf_header test. Smaller BTF size", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "\0int", + .str_sec_size = sizeof("\0int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "hdr_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .str_len_delta = 4, + .err_str = "Total section length too long", +}, + +{ + .descr = "array test. index_type/elem_type \"int\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int[16] */ /* [2] */ + BTF_TYPE_ARRAY_ENC(1, 1, 16), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "array test. index_type/elem_type \"const int\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int[16] */ /* [2] */ + BTF_TYPE_ARRAY_ENC(3, 3, 16), + /* CONST type_id=1 */ /* [3] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "array test. index_type \"const int:31\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int:31 */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 31, 4), + /* int[16] */ /* [3] */ + BTF_TYPE_ARRAY_ENC(1, 4, 16), + /* CONST type_id=2 */ /* [4] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid index", +}, + +{ + .descr = "array test. elem_type \"const int:31\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int:31 */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 31, 4), + /* int[16] */ /* [3] */ + BTF_TYPE_ARRAY_ENC(4, 1, 16), + /* CONST type_id=2 */ /* [4] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid array of int", +}, + +{ + .descr = "array test. index_type \"void\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int[16] */ /* [2] */ + BTF_TYPE_ARRAY_ENC(1, 0, 16), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid index", +}, + +{ + .descr = "array test. index_type \"const void\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int[16] */ /* [2] */ + BTF_TYPE_ARRAY_ENC(1, 3, 16), + /* CONST type_id=0 (void) */ /* [3] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid index", +}, + +{ + .descr = "array test. elem_type \"const void\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int[16] */ /* [2] */ + BTF_TYPE_ARRAY_ENC(3, 1, 16), + /* CONST type_id=0 (void) */ /* [3] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid elem", +}, + +{ + .descr = "array test. elem_type \"const void *\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* const void *[16] */ /* [2] */ + BTF_TYPE_ARRAY_ENC(3, 1, 16), + /* CONST type_id=4 */ /* [3] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 4), + /* void* */ /* [4] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "array test. index_type \"const void *\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* const void *[16] */ /* [2] */ + BTF_TYPE_ARRAY_ENC(3, 3, 16), + /* CONST type_id=4 */ /* [3] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 4), + /* void* */ /* [4] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid index", +}, + +{ + .descr = "array test. t->size != 0\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* int[16] */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 1), + BTF_ARRAY_ENC(1, 1, 16), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "size != 0", +}, + +{ + .descr = "int test. invalid int_data", + .raw_types = { + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), 4), + 0x10000000, + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid int_data", +}, + +{ + .descr = "invalid BTF_INFO", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + BTF_TYPE_ENC(0, 0x20000000, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info", +}, + +{ + .descr = "fwd test. t->type != 0\"", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* fwd type */ /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 1), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "fwd_test_map", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "type != 0", +}, + +{ + .descr = "typedef (invalid name, name_off = 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(0, 1), /* [2] */ + BTF_END_RAW, + }, + .str_sec = "\0__int", + .str_sec_size = sizeof("\0__int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "typedef_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "typedef (invalid name, invalid identifier)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */ + BTF_END_RAW, + }, + .str_sec = "\0__!int", + .str_sec_size = sizeof("\0__!int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "typedef_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "ptr type (invalid name, name_off <> 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */ + BTF_END_RAW, + }, + .str_sec = "\0__int", + .str_sec_size = sizeof("\0__int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "ptr_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "volatile type (invalid name, name_off <> 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */ + BTF_END_RAW, + }, + .str_sec = "\0__int", + .str_sec_size = sizeof("\0__int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "volatile_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "const type (invalid name, name_off <> 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1), /* [2] */ + BTF_END_RAW, + }, + .str_sec = "\0__int", + .str_sec_size = sizeof("\0__int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "const_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "restrict type (invalid name, name_off <> 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */ + BTF_END_RAW, + }, + .str_sec = "\0__int", + .str_sec_size = sizeof("\0__int"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "restrict_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "fwd type (invalid name, name_off = 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */ + BTF_END_RAW, + }, + .str_sec = "\0__skb", + .str_sec_size = sizeof("\0__skb"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "fwd_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "fwd type (invalid name, invalid identifier)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */ + BTF_END_RAW, + }, + .str_sec = "\0__!skb", + .str_sec_size = sizeof("\0__!skb"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "fwd_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "array type (invalid name, name_off <> 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), /* [2] */ + BTF_ARRAY_ENC(1, 1, 4), + BTF_END_RAW, + }, + .str_sec = "\0__skb", + .str_sec_size = sizeof("\0__skb"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "struct type (name_off = 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, + BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_END_RAW, + }, + .str_sec = "\0A", + .str_sec_size = sizeof("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct type (invalid name, invalid identifier)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_END_RAW, + }, + .str_sec = "\0A!\0B", + .str_sec_size = sizeof("\0A!\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "struct member (name_off = 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, + BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_END_RAW, + }, + .str_sec = "\0A", + .str_sec_size = sizeof("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct member (invalid name, invalid identifier)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_END_RAW, + }, + .str_sec = "\0A\0B*", + .str_sec_size = sizeof("\0A\0B*"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "enum type (name_off = 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, + BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), + sizeof(int)), /* [2] */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_END_RAW, + }, + .str_sec = "\0A\0B", + .str_sec_size = sizeof("\0A\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "enum_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "enum type (invalid name, invalid identifier)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), + sizeof(int)), /* [2] */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_END_RAW, + }, + .str_sec = "\0A!\0B", + .str_sec_size = sizeof("\0A!\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "enum_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "enum member (invalid name, name_off = 0)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, + BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), + sizeof(int)), /* [2] */ + BTF_ENUM_ENC(0, 0), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "enum_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "enum member (invalid name, invalid identifier)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, + BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), + sizeof(int)), /* [2] */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_END_RAW, + }, + .str_sec = "\0A!", + .str_sec_size = sizeof("\0A!"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "enum_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, +{ + .descr = "arraymap invalid btf key (a bit field)", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* 32 bit int with 32 bit offset */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 32, 32, 8), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_map_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 2, + .value_type_id = 1, + .max_entries = 4, + .map_create_err = true, +}, + +{ + .descr = "arraymap invalid btf key (!= 32 bits)", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* 16 bit int with 0 bit offset */ /* [2] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 16, 2), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_map_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 2, + .value_type_id = 1, + .max_entries = 4, + .map_create_err = true, +}, + +{ + .descr = "arraymap invalid btf value (too small)", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_map_check_btf", + .key_size = sizeof(int), + /* btf_value_size < map->value_size */ + .value_size = sizeof(__u64), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .map_create_err = true, +}, + +{ + .descr = "arraymap invalid btf value (too big)", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_map_check_btf", + .key_size = sizeof(int), + /* btf_value_size > map->value_size */ + .value_size = sizeof(__u16), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .map_create_err = true, +}, + +{ + .descr = "func proto (int (*)(int, unsigned int))", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* int (*)(int, unsigned int) */ + BTF_FUNC_PROTO_ENC(1, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(0, 1), + BTF_FUNC_PROTO_ARG_ENC(0, 2), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "func proto (vararg)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int, unsigned int, ...) */ + BTF_FUNC_PROTO_ENC(0, 3), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(0, 1), + BTF_FUNC_PROTO_ARG_ENC(0, 2), + BTF_FUNC_PROTO_ARG_ENC(0, 0), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "func proto (vararg with name)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int a, unsigned int b, ... c) */ + BTF_FUNC_PROTO_ENC(0, 3), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 0), + BTF_END_RAW, + }, + .str_sec = "\0a\0b\0c", + .str_sec_size = sizeof("\0a\0b\0c"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid arg#3", +}, + +{ + .descr = "func proto (arg after vararg)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int a, ..., unsigned int b) */ + BTF_FUNC_PROTO_ENC(0, 3), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(0, 0), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_END_RAW, + }, + .str_sec = "\0a\0b", + .str_sec_size = sizeof("\0a\0b"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid arg#2", +}, + +{ + .descr = "func proto (CONST=>TYPEDEF=>PTR=>FUNC_PROTO)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* typedef void (*func_ptr)(int, unsigned int) */ + BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [3] */ + /* const func_ptr */ + BTF_CONST_ENC(3), /* [4] */ + BTF_PTR_ENC(6), /* [5] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [6] */ + BTF_FUNC_PROTO_ARG_ENC(0, 1), + BTF_FUNC_PROTO_ARG_ENC(0, 2), + BTF_END_RAW, + }, + .str_sec = "\0func_ptr", + .str_sec_size = sizeof("\0func_ptr"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "func proto (TYPEDEF=>FUNC_PROTO)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [4] */ + BTF_FUNC_PROTO_ARG_ENC(0, 1), + BTF_FUNC_PROTO_ARG_ENC(0, 2), + BTF_END_RAW, + }, + .str_sec = "\0func_typedef", + .str_sec_size = sizeof("\0func_typedef"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "func proto (btf_resolve(arg))", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* void (*)(const void *) */ + BTF_FUNC_PROTO_ENC(0, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(0, 3), + BTF_CONST_ENC(4), /* [3] */ + BTF_PTR_ENC(0), /* [4] */ + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "func proto (Not all arg has name)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int, unsigned int b) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(0, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_END_RAW, + }, + .str_sec = "\0b", + .str_sec_size = sizeof("\0b"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "func proto (Bad arg name_off)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int a, unsigned int <bad_name_off>) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(0x0fffffff, 2), + BTF_END_RAW, + }, + .str_sec = "\0a", + .str_sec_size = sizeof("\0a"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid arg#2", +}, + +{ + .descr = "func proto (Bad arg name)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int a, unsigned int !!!) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_END_RAW, + }, + .str_sec = "\0a\0!!!", + .str_sec_size = sizeof("\0a\0!!!"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid arg#2", +}, + +{ + .descr = "func proto (Invalid return type)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* <bad_ret_type> (*)(int, unsigned int) */ + BTF_FUNC_PROTO_ENC(100, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(0, 1), + BTF_FUNC_PROTO_ARG_ENC(0, 2), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid return type", +}, + +{ + .descr = "func proto (with func name)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void func_proto(int, unsigned int) */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 2), 0), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(0, 1), + BTF_FUNC_PROTO_ARG_ENC(0, 2), + BTF_END_RAW, + }, + .str_sec = "\0func_proto", + .str_sec_size = sizeof("\0func_proto"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "func proto (const void arg)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(const void) */ + BTF_FUNC_PROTO_ENC(0, 1), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(0, 4), + BTF_CONST_ENC(0), /* [4] */ + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid arg#1", +}, + +{ + .descr = "func (void func(int a, unsigned int b))", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int a, unsigned int b) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + /* void func(int a, unsigned int b) */ + BTF_FUNC_ENC(NAME_TBD, 3), /* [4] */ + BTF_END_RAW, + }, + .str_sec = "\0a\0b\0func", + .str_sec_size = sizeof("\0a\0b\0func"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "func (No func name)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int a, unsigned int b) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + /* void <no_name>(int a, unsigned int b) */ + BTF_FUNC_ENC(0, 3), /* [4] */ + BTF_END_RAW, + }, + .str_sec = "\0a\0b", + .str_sec_size = sizeof("\0a\0b"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "func (Invalid func name)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int a, unsigned int b) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + /* void !!!(int a, unsigned int b) */ + BTF_FUNC_ENC(NAME_TBD, 3), /* [4] */ + BTF_END_RAW, + }, + .str_sec = "\0a\0b\0!!!", + .str_sec_size = sizeof("\0a\0b\0!!!"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid name", +}, + +{ + .descr = "func (Some arg has no name)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int a, unsigned int) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(0, 2), + /* void func(int a, unsigned int) */ + BTF_FUNC_ENC(NAME_TBD, 3), /* [4] */ + BTF_END_RAW, + }, + .str_sec = "\0a\0func", + .str_sec_size = sizeof("\0a\0func"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid arg#2", +}, + +{ + .descr = "func (Non zero vlen)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ + /* void (*)(int a, unsigned int b) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + /* void func(int a, unsigned int b) */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 2), 3), /* [4] */ + BTF_END_RAW, + }, + .str_sec = "\0a\0b\0func", + .str_sec_size = sizeof("\0a\0b\0func"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid func linkage", +}, + +{ + .descr = "func (Not referring to FUNC_PROTO)", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_ENC(NAME_TBD, 1), /* [2] */ + BTF_END_RAW, + }, + .str_sec = "\0func", + .str_sec_size = sizeof("\0func"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid type_id", +}, + +{ + .descr = "invalid int kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_INT, 1, 0), 4), /* [2] */ + BTF_INT_ENC(0, 0, 32), + BTF_END_RAW, + }, + BTF_STR_SEC(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "int_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, + +{ + .descr = "invalid ptr kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 1, 0), 1), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "ptr_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, + +{ + .descr = "invalid array kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 1, 0), 0), /* [2] */ + BTF_ARRAY_ENC(1, 1, 1), + BTF_END_RAW, + }, + BTF_STR_SEC(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "array_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, + +{ + .descr = "valid fwd kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "fwd_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "invalid typedef kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, + BTF_INFO_ENC(BTF_KIND_TYPEDEF, 1, 0), 1), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "typedef_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, + +{ + .descr = "invalid volatile kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 1, 0), 1), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "volatile_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, + +{ + .descr = "invalid const kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 1, 0), 1), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "const_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, + +{ + .descr = "invalid restrict kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 1, 0), 1), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "restrict_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, + +{ + .descr = "invalid func kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 0), 0), /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC, 1, 0), 2), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, + +{ + .descr = "invalid func_proto kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 1, 0), 0), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC(""), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "func_proto_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, + +{ + .descr = "valid struct, kind_flag, bitfield_size = 0", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 8), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(0, 0)), + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(0, 32)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "valid struct, kind_flag, int member, bitfield_size != 0", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)), + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 4)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "valid union, kind_flag, int member, bitfield_size != 0", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)), + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "union_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "valid struct, kind_flag, enum member, bitfield_size != 0", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4),/* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)), + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 4)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B\0C"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "valid union, kind_flag, enum member, bitfield_size != 0", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)), + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B\0C"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "union_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "valid struct, kind_flag, typedef member, bitfield_size != 0", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4),/* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 4, BTF_MEMBER_OFFSET(4, 0)), + BTF_MEMBER_ENC(NAME_TBD, 5, BTF_MEMBER_OFFSET(4, 4)), + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [4] */ + BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B\0C\0D\0E"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "valid union, kind_flag, typedef member, bitfield_size != 0", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 4, BTF_MEMBER_OFFSET(4, 0)), + BTF_MEMBER_ENC(NAME_TBD, 5, BTF_MEMBER_OFFSET(4, 0)), + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [4] */ + BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B\0C\0D\0E"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "union_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "invalid struct, kind_flag, bitfield_size greater than struct size", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 0)), + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 20)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Member exceeds struct_size", +}, + +{ + .descr = "invalid struct, kind_flag, bitfield base_type int not regular", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 20, 4), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(20, 0)), + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(20, 20)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid member base type", +}, + +{ + .descr = "invalid struct, kind_flag, base_type int not regular", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 12, 4), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(8, 0)), + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(8, 8)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid member base type", +}, + +{ + .descr = "invalid union, kind_flag, bitfield_size greater than struct size", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 2), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(8, 0)), + BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "union_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Member exceeds struct_size", +}, + +{ + .descr = "invalid struct, kind_flag, int member, bitfield_size = 0, wrong byte alignment", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 12), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 36)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid member offset", +}, + +{ + .descr = "invalid struct, kind_flag, enum member, bitfield_size = 0, wrong byte alignment", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 12), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 36)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B\0C"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, + .btf_load_err = true, + .err_str = "Invalid member offset", +}, + +{ + .descr = "128-bit int", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "int_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct, 128-bit int member", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct, 120-bit int member bitfield", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 120, 16), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct, kind_flag, 128-bit int member", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct, kind_flag, 120-bit int member bitfield", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(120, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, +/* + * typedef int arr_t[16]; + * struct s { + * arr_t *a; + * }; + */ +{ + .descr = "struct->ptr->typedef->array->int size resolution", + .raw_types = { + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + BTF_PTR_ENC(3), /* [2] */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ + BTF_TYPE_ARRAY_ENC(5, 5, 16), /* [4] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0s\0a\0arr_t"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "ptr_mod_chain_size_resolve_map", + .key_size = sizeof(int), + .value_size = sizeof(int) * 16, + .key_type_id = 5 /* int */, + .value_type_id = 3 /* arr_t */, + .max_entries = 4, +}, +/* + * typedef int arr_t[16][8][4]; + * struct s { + * arr_t *a; + * }; + */ +{ + .descr = "struct->ptr->typedef->multi-array->int size resolution", + .raw_types = { + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + BTF_PTR_ENC(3), /* [2] */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ + BTF_TYPE_ARRAY_ENC(5, 7, 16), /* [4] */ + BTF_TYPE_ARRAY_ENC(6, 7, 8), /* [5] */ + BTF_TYPE_ARRAY_ENC(7, 7, 4), /* [6] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0s\0a\0arr_t"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "multi_arr_size_resolve_map", + .key_size = sizeof(int), + .value_size = sizeof(int) * 16 * 8 * 4, + .key_type_id = 7 /* int */, + .value_type_id = 3 /* arr_t */, + .max_entries = 4, +}, +/* + * typedef int int_t; + * typedef int_t arr3_t[4]; + * typedef arr3_t arr2_t[8]; + * typedef arr2_t arr1_t[16]; + * struct s { + * arr1_t *a; + * }; + */ +{ + .descr = "typedef/multi-arr mix size resolution", + .raw_types = { + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + BTF_PTR_ENC(3), /* [2] */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ + BTF_TYPE_ARRAY_ENC(5, 10, 16), /* [4] */ + BTF_TYPEDEF_ENC(NAME_TBD, 6), /* [5] */ + BTF_TYPE_ARRAY_ENC(7, 10, 8), /* [6] */ + BTF_TYPEDEF_ENC(NAME_TBD, 8), /* [7] */ + BTF_TYPE_ARRAY_ENC(9, 10, 4), /* [8] */ + BTF_TYPEDEF_ENC(NAME_TBD, 10), /* [9] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [10] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0s\0a\0arr1_t\0arr2_t\0arr3_t\0int_t"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "typedef_arra_mix_size_resolve_map", + .key_size = sizeof(int), + .value_size = sizeof(int) * 16 * 8 * 4, + .key_type_id = 10 /* int */, + .value_type_id = 3 /* arr_t */, + .max_entries = 4, +}, +/* + * elf .rodata section size 4 and btf .rodata section vlen 0. + */ +{ + .descr = "datasec: vlen == 0", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* .rodata section */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 0), 4), + /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0.rodata"), + .map_type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, + +{ + .descr = "float test #1, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + /* [1] */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [2] */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [3] */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 8), /* [4] */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 12), /* [5] */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 16), /* [6] */ + BTF_STRUCT_ENC(NAME_TBD, 5, 48), /* [7] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + BTF_MEMBER_ENC(NAME_TBD, 3, 32), + BTF_MEMBER_ENC(NAME_TBD, 4, 64), + BTF_MEMBER_ENC(NAME_TBD, 5, 128), + BTF_MEMBER_ENC(NAME_TBD, 6, 256), + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0_Float16\0float\0double\0_Float80\0long_double" + "\0floats\0a\0b\0c\0d\0e"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "float_type_check_btf", + .key_size = sizeof(int), + .value_size = 48, + .key_type_id = 1, + .value_type_id = 7, + .max_entries = 1, +}, +{ + .descr = "float test #2, invalid vlen", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + /* [1] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 1), 4), + /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0float"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "float_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, + .btf_load_err = true, + .err_str = "vlen != 0", +}, +{ + .descr = "float test #3, invalid kind_flag", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + /* [1] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FLOAT, 1, 0), 4), + /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0float"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "float_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, +{ + .descr = "float test #4, member does not fit", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + /* [1] */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [2] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 2), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0float\0floats\0x"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "float_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Member exceeds struct_size", +}, +{ + .descr = "float test #5, member is not properly aligned", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + /* [1] */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 4), /* [2] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 8), + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0float\0floats\0x"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "float_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Member is not properly aligned", +}, +{ + .descr = "float test #6, invalid size", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + /* [1] */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 6), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0float"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "float_type_check_btf", + .key_size = sizeof(int), + .value_size = 6, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type_size", +}, + +{ + .descr = "decl_tag test #1, struct/member, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 32), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0m1\0m2\0tag1\0tag2\0tag3"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 8, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #2, union/member, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_UNION_ENC(NAME_TBD, 2, 4), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #3, variable, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_VAR_ENC(NAME_TBD, 1, 1), /* [3] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 3, -1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0global\0tag1\0tag2"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #4, func/parameter, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_DECL_TAG_ENC(NAME_TBD, 3, -1), + BTF_DECL_TAG_ENC(NAME_TBD, 3, 0), + BTF_DECL_TAG_ENC(NAME_TBD, 3, 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0arg1\0arg2\0f\0tag1\0tag2\0tag3"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #5, invalid value", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_DECL_TAG_ENC(0, 2, -1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid value", +}, +{ + .descr = "decl_tag test #6, invalid target type", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_DECL_TAG_ENC(NAME_TBD, 1, -1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type", +}, +{ + .descr = "decl_tag test #7, invalid vlen", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 1), 2), (0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag1"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "vlen != 0", +}, +{ + .descr = "decl_tag test #8, invalid kflag", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 1, 0), 2), (-1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag1"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid btf_info kind_flag", +}, +{ + .descr = "decl_tag test #9, var, invalid component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, +{ + .descr = "decl_tag test #10, struct member, invalid component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 32), + BTF_DECL_TAG_ENC(NAME_TBD, 2, 2), + BTF_END_RAW, + }, + BTF_STR_SEC("\0m1\0m2\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 8, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, +{ + .descr = "decl_tag test #11, func parameter, invalid component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_DECL_TAG_ENC(NAME_TBD, 3, 2), + BTF_END_RAW, + }, + BTF_STR_SEC("\0arg1\0arg2\0f\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, +{ + .descr = "decl_tag test #12, < -1 component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_DECL_TAG_ENC(NAME_TBD, 3, -2), + BTF_END_RAW, + }, + BTF_STR_SEC("\0arg1\0arg2\0f\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, +{ + .descr = "decl_tag test #13, typedef, well-formed", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "decl_tag test #14, typedef, invalid component_idx", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */ + BTF_DECL_TAG_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid component_idx", +}, +{ + .descr = "decl_tag test #15, func, invalid func proto", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_DECL_TAG_ENC(NAME_TBD, 3, 0), /* [2] */ + BTF_FUNC_ENC(NAME_TBD, 8), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0func"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type_id", +}, +{ + .descr = "decl_tag test #16, func proto, return type", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), 2), (-1), /* [3] */ + BTF_FUNC_PROTO_ENC(3, 0), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0local\0tag1"), + .btf_load_err = true, + .err_str = "Invalid return type", +}, +{ + .descr = "type_tag test #1", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [2] */ + BTF_PTR_ENC(2), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "type_tag test #2, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_CONST_ENC(3), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "type_tag test #3, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(4), /* [3] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "type_tag test #4, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(4), /* [3] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "type_tag test #5, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(1), /* [3] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 2), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "type_tag test #6, type tag order", + .raw_types = { + BTF_PTR_ENC(2), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(4), /* [3] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */ + BTF_PTR_ENC(6), /* [5] */ + BTF_CONST_ENC(2), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "enum64 test #1, unsigned, size 8", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [2] */ + BTF_ENUM64_ENC(NAME_TBD, 0, 0), + BTF_ENUM64_ENC(NAME_TBD, 1, 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0a\0b\0c"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 8, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, +}, +{ + .descr = "enum64 test #2, signed, size 4", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 1, 2), 4), /* [2] */ + BTF_ENUM64_ENC(NAME_TBD, -1, 0), + BTF_ENUM64_ENC(NAME_TBD, 1, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0a\0b\0c"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 2, + .max_entries = 1, +}, + +}; /* struct btf_raw_test raw_tests[] */ + +static const char *get_next_str(const char *start, const char *end) +{ + return start < end - 1 ? start + 1 : NULL; +} + +static int get_raw_sec_size(const __u32 *raw_types) +{ + int i; + + for (i = MAX_NR_RAW_U32 - 1; + i >= 0 && raw_types[i] != BTF_END_RAW; + i--) + ; + + return i < 0 ? i : i * sizeof(raw_types[0]); +} + +static void *btf_raw_create(const struct btf_header *hdr, + const __u32 *raw_types, + const char *str, + unsigned int str_sec_size, + unsigned int *btf_size, + const char **ret_next_str) +{ + const char *next_str = str, *end_str = str + str_sec_size; + const char **strs_idx = NULL, **tmp_strs_idx; + int strs_cap = 0, strs_cnt = 0, next_str_idx = 0; + unsigned int size_needed, offset; + struct btf_header *ret_hdr; + int i, type_sec_size, err = 0; + uint32_t *ret_types; + void *raw_btf = NULL; + + type_sec_size = get_raw_sec_size(raw_types); + if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types")) + return NULL; + + size_needed = sizeof(*hdr) + type_sec_size + str_sec_size; + raw_btf = malloc(size_needed); + if (CHECK(!raw_btf, "Cannot allocate memory for raw_btf")) + return NULL; + + /* Copy header */ + memcpy(raw_btf, hdr, sizeof(*hdr)); + offset = sizeof(*hdr); + + /* Index strings */ + while ((next_str = get_next_str(next_str, end_str))) { + if (strs_cnt == strs_cap) { + strs_cap += max(16, strs_cap / 2); + tmp_strs_idx = realloc(strs_idx, + sizeof(*strs_idx) * strs_cap); + if (CHECK(!tmp_strs_idx, + "Cannot allocate memory for strs_idx")) { + err = -1; + goto done; + } + strs_idx = tmp_strs_idx; + } + strs_idx[strs_cnt++] = next_str; + next_str += strlen(next_str); + } + + /* Copy type section */ + ret_types = raw_btf + offset; + for (i = 0; i < type_sec_size / sizeof(raw_types[0]); i++) { + if (raw_types[i] == NAME_TBD) { + if (CHECK(next_str_idx == strs_cnt, + "Error in getting next_str #%d", + next_str_idx)) { + err = -1; + goto done; + } + ret_types[i] = strs_idx[next_str_idx++] - str; + } else if (IS_NAME_NTH(raw_types[i])) { + int idx = GET_NAME_NTH_IDX(raw_types[i]); + + if (CHECK(idx <= 0 || idx > strs_cnt, + "Error getting string #%d, strs_cnt:%d", + idx, strs_cnt)) { + err = -1; + goto done; + } + ret_types[i] = strs_idx[idx-1] - str; + } else { + ret_types[i] = raw_types[i]; + } + } + offset += type_sec_size; + + /* Copy string section */ + memcpy(raw_btf + offset, str, str_sec_size); + + ret_hdr = (struct btf_header *)raw_btf; + ret_hdr->type_len = type_sec_size; + ret_hdr->str_off = type_sec_size; + ret_hdr->str_len = str_sec_size; + + *btf_size = size_needed; + if (ret_next_str) + *ret_next_str = + next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL; + +done: + free(strs_idx); + if (err) { + free(raw_btf); + return NULL; + } + return raw_btf; +} + +static int load_raw_btf(const void *raw_data, size_t raw_size) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + int btf_fd; + + if (always_log) { + opts.log_buf = btf_log_buf, + opts.log_size = BTF_LOG_BUF_SIZE, + opts.log_level = 1; + } + + btf_fd = bpf_btf_load(raw_data, raw_size, &opts); + if (btf_fd < 0 && !always_log) { + opts.log_buf = btf_log_buf, + opts.log_size = BTF_LOG_BUF_SIZE, + opts.log_level = 1; + btf_fd = bpf_btf_load(raw_data, raw_size, &opts); + } + + return btf_fd; +} + +static void do_test_raw(unsigned int test_num) +{ + struct btf_raw_test *test = &raw_tests[test_num - 1]; + LIBBPF_OPTS(bpf_map_create_opts, opts); + int map_fd = -1, btf_fd = -1; + unsigned int raw_btf_size; + struct btf_header *hdr; + void *raw_btf; + int err; + + if (!test__start_subtest(test->descr)) + return; + + raw_btf = btf_raw_create(&hdr_tmpl, + test->raw_types, + test->str_sec, + test->str_sec_size, + &raw_btf_size, NULL); + if (!raw_btf) + return; + + hdr = raw_btf; + + hdr->hdr_len = (int)hdr->hdr_len + test->hdr_len_delta; + hdr->type_off = (int)hdr->type_off + test->type_off_delta; + hdr->str_off = (int)hdr->str_off + test->str_off_delta; + hdr->str_len = (int)hdr->str_len + test->str_len_delta; + + *btf_log_buf = '\0'; + btf_fd = load_raw_btf(raw_btf, raw_btf_size); + free(raw_btf); + + err = ((btf_fd < 0) != test->btf_load_err); + if (CHECK(err, "btf_fd:%d test->btf_load_err:%u", + btf_fd, test->btf_load_err) || + CHECK(test->err_str && !strstr(btf_log_buf, test->err_str), + "expected err_str:%s\n", test->err_str)) { + err = -1; + goto done; + } + + if (err || btf_fd < 0) + goto done; + + opts.btf_fd = btf_fd; + opts.btf_key_type_id = test->key_type_id; + opts.btf_value_type_id = test->value_type_id; + map_fd = bpf_map_create(test->map_type, test->map_name, + test->key_size, test->value_size, test->max_entries, &opts); + + err = ((map_fd < 0) != test->map_create_err); + CHECK(err, "map_fd:%d test->map_create_err:%u", + map_fd, test->map_create_err); + +done: + if (*btf_log_buf && (err || always_log)) + fprintf(stderr, "\n%s", btf_log_buf); + if (btf_fd >= 0) + close(btf_fd); + if (map_fd >= 0) + close(map_fd); +} + +struct btf_get_info_test { + const char *descr; + const char *str_sec; + __u32 raw_types[MAX_NR_RAW_U32]; + __u32 str_sec_size; + int btf_size_delta; + int (*special_test)(unsigned int test_num); +}; + +static int test_big_btf_info(unsigned int test_num); +static int test_btf_id(unsigned int test_num); + +const struct btf_get_info_test get_info_tests[] = { +{ + .descr = "== raw_btf_size+1", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .btf_size_delta = 1, +}, +{ + .descr = "== raw_btf_size-3", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .btf_size_delta = -3, +}, +{ + .descr = "Large bpf_btf_info", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .special_test = test_big_btf_info, +}, +{ + .descr = "BTF ID", + .raw_types = { + /* int */ /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), + /* unsigned int */ /* [2] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), + BTF_END_RAW, + }, + .str_sec = "", + .str_sec_size = sizeof(""), + .special_test = test_btf_id, +}, +}; + +static int test_big_btf_info(unsigned int test_num) +{ + const struct btf_get_info_test *test = &get_info_tests[test_num - 1]; + uint8_t *raw_btf = NULL, *user_btf = NULL; + unsigned int raw_btf_size; + struct { + struct bpf_btf_info info; + uint64_t garbage; + } info_garbage; + struct bpf_btf_info *info; + int btf_fd = -1, err; + uint32_t info_len; + + raw_btf = btf_raw_create(&hdr_tmpl, + test->raw_types, + test->str_sec, + test->str_sec_size, + &raw_btf_size, NULL); + + if (!raw_btf) + return -1; + + *btf_log_buf = '\0'; + + user_btf = malloc(raw_btf_size); + if (CHECK(!user_btf, "!user_btf")) { + err = -1; + goto done; + } + + btf_fd = load_raw_btf(raw_btf, raw_btf_size); + if (CHECK(btf_fd < 0, "errno:%d", errno)) { + err = -1; + goto done; + } + + /* + * GET_INFO should error out if the userspace info + * has non zero tailing bytes. + */ + info = &info_garbage.info; + memset(info, 0, sizeof(*info)); + info_garbage.garbage = 0xdeadbeef; + info_len = sizeof(info_garbage); + info->btf = ptr_to_u64(user_btf); + info->btf_size = raw_btf_size; + + err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len); + if (CHECK(!err, "!err")) { + err = -1; + goto done; + } + + /* + * GET_INFO should succeed even info_len is larger than + * the kernel supported as long as tailing bytes are zero. + * The kernel supported info len should also be returned + * to userspace. + */ + info_garbage.garbage = 0; + err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len); + if (CHECK(err || info_len != sizeof(*info), + "err:%d errno:%d info_len:%u sizeof(*info):%zu", + err, errno, info_len, sizeof(*info))) { + err = -1; + goto done; + } + + fprintf(stderr, "OK"); + +done: + if (*btf_log_buf && (err || always_log)) + fprintf(stderr, "\n%s", btf_log_buf); + + free(raw_btf); + free(user_btf); + + if (btf_fd >= 0) + close(btf_fd); + + return err; +} + +static int test_btf_id(unsigned int test_num) +{ + const struct btf_get_info_test *test = &get_info_tests[test_num - 1]; + LIBBPF_OPTS(bpf_map_create_opts, opts); + uint8_t *raw_btf = NULL, *user_btf[2] = {}; + int btf_fd[2] = {-1, -1}, map_fd = -1; + struct bpf_map_info map_info = {}; + struct bpf_btf_info info[2] = {}; + unsigned int raw_btf_size; + uint32_t info_len; + int err, i, ret; + + raw_btf = btf_raw_create(&hdr_tmpl, + test->raw_types, + test->str_sec, + test->str_sec_size, + &raw_btf_size, NULL); + + if (!raw_btf) + return -1; + + *btf_log_buf = '\0'; + + for (i = 0; i < 2; i++) { + user_btf[i] = malloc(raw_btf_size); + if (CHECK(!user_btf[i], "!user_btf[%d]", i)) { + err = -1; + goto done; + } + info[i].btf = ptr_to_u64(user_btf[i]); + info[i].btf_size = raw_btf_size; + } + + btf_fd[0] = load_raw_btf(raw_btf, raw_btf_size); + if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) { + err = -1; + goto done; + } + + /* Test BPF_OBJ_GET_INFO_BY_ID on btf_id */ + info_len = sizeof(info[0]); + err = bpf_obj_get_info_by_fd(btf_fd[0], &info[0], &info_len); + if (CHECK(err, "errno:%d", errno)) { + err = -1; + goto done; + } + + btf_fd[1] = bpf_btf_get_fd_by_id(info[0].id); + if (CHECK(btf_fd[1] < 0, "errno:%d", errno)) { + err = -1; + goto done; + } + + ret = 0; + err = bpf_obj_get_info_by_fd(btf_fd[1], &info[1], &info_len); + if (CHECK(err || info[0].id != info[1].id || + info[0].btf_size != info[1].btf_size || + (ret = memcmp(user_btf[0], user_btf[1], info[0].btf_size)), + "err:%d errno:%d id0:%u id1:%u btf_size0:%u btf_size1:%u memcmp:%d", + err, errno, info[0].id, info[1].id, + info[0].btf_size, info[1].btf_size, ret)) { + err = -1; + goto done; + } + + /* Test btf members in struct bpf_map_info */ + opts.btf_fd = btf_fd[0]; + opts.btf_key_type_id = 1; + opts.btf_value_type_id = 2; + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_btf_id", + sizeof(int), sizeof(int), 4, &opts); + if (CHECK(map_fd < 0, "errno:%d", errno)) { + err = -1; + goto done; + } + + info_len = sizeof(map_info); + err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); + if (CHECK(err || map_info.btf_id != info[0].id || + map_info.btf_key_type_id != 1 || map_info.btf_value_type_id != 2, + "err:%d errno:%d info.id:%u btf_id:%u btf_key_type_id:%u btf_value_type_id:%u", + err, errno, info[0].id, map_info.btf_id, map_info.btf_key_type_id, + map_info.btf_value_type_id)) { + err = -1; + goto done; + } + + for (i = 0; i < 2; i++) { + close(btf_fd[i]); + btf_fd[i] = -1; + } + + /* Test BTF ID is removed from the kernel */ + btf_fd[0] = bpf_btf_get_fd_by_id(map_info.btf_id); + if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) { + err = -1; + goto done; + } + close(btf_fd[0]); + btf_fd[0] = -1; + + /* The map holds the last ref to BTF and its btf_id */ + close(map_fd); + map_fd = -1; + btf_fd[0] = bpf_btf_get_fd_by_id(map_info.btf_id); + if (CHECK(btf_fd[0] >= 0, "BTF lingers")) { + err = -1; + goto done; + } + + fprintf(stderr, "OK"); + +done: + if (*btf_log_buf && (err || always_log)) + fprintf(stderr, "\n%s", btf_log_buf); + + free(raw_btf); + if (map_fd >= 0) + close(map_fd); + for (i = 0; i < 2; i++) { + free(user_btf[i]); + if (btf_fd[i] >= 0) + close(btf_fd[i]); + } + + return err; +} + +static void do_test_get_info(unsigned int test_num) +{ + const struct btf_get_info_test *test = &get_info_tests[test_num - 1]; + unsigned int raw_btf_size, user_btf_size, expected_nbytes; + uint8_t *raw_btf = NULL, *user_btf = NULL; + struct bpf_btf_info info = {}; + int btf_fd = -1, err, ret; + uint32_t info_len; + + if (!test__start_subtest(test->descr)) + return; + + if (test->special_test) { + err = test->special_test(test_num); + if (CHECK(err, "failed: %d\n", err)) + return; + } + + raw_btf = btf_raw_create(&hdr_tmpl, + test->raw_types, + test->str_sec, + test->str_sec_size, + &raw_btf_size, NULL); + + if (!raw_btf) + return; + + *btf_log_buf = '\0'; + + user_btf = malloc(raw_btf_size); + if (CHECK(!user_btf, "!user_btf")) { + err = -1; + goto done; + } + + btf_fd = load_raw_btf(raw_btf, raw_btf_size); + if (CHECK(btf_fd <= 0, "errno:%d", errno)) { + err = -1; + goto done; + } + + user_btf_size = (int)raw_btf_size + test->btf_size_delta; + expected_nbytes = min(raw_btf_size, user_btf_size); + if (raw_btf_size > expected_nbytes) + memset(user_btf + expected_nbytes, 0xff, + raw_btf_size - expected_nbytes); + + info_len = sizeof(info); + info.btf = ptr_to_u64(user_btf); + info.btf_size = user_btf_size; + + ret = 0; + err = bpf_obj_get_info_by_fd(btf_fd, &info, &info_len); + if (CHECK(err || !info.id || info_len != sizeof(info) || + info.btf_size != raw_btf_size || + (ret = memcmp(raw_btf, user_btf, expected_nbytes)), + "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d", + err, errno, info.id, info_len, sizeof(info), + raw_btf_size, info.btf_size, expected_nbytes, ret)) { + err = -1; + goto done; + } + + while (expected_nbytes < raw_btf_size) { + fprintf(stderr, "%u...", expected_nbytes); + if (CHECK(user_btf[expected_nbytes++] != 0xff, + "user_btf[%u]:%x != 0xff", expected_nbytes - 1, + user_btf[expected_nbytes - 1])) { + err = -1; + goto done; + } + } + + fprintf(stderr, "OK"); + +done: + if (*btf_log_buf && (err || always_log)) + fprintf(stderr, "\n%s", btf_log_buf); + + free(raw_btf); + free(user_btf); + + if (btf_fd >= 0) + close(btf_fd); +} + +struct btf_file_test { + const char *file; + bool btf_kv_notfound; +}; + +static struct btf_file_test file_tests[] = { + { .file = "test_btf_newkv.bpf.o", }, + { .file = "test_btf_nokv.bpf.o", .btf_kv_notfound = true, }, +}; + +static void do_test_file(unsigned int test_num) +{ + const struct btf_file_test *test = &file_tests[test_num - 1]; + const char *expected_fnames[] = {"_dummy_tracepoint", + "test_long_fname_1", + "test_long_fname_2"}; + struct btf_ext *btf_ext = NULL; + struct bpf_prog_info info = {}; + struct bpf_object *obj = NULL; + struct bpf_func_info *finfo; + struct bpf_program *prog; + __u32 info_len, rec_size; + bool has_btf_ext = false; + struct btf *btf = NULL; + void *func_info = NULL; + struct bpf_map *map; + int i, err, prog_fd; + + if (!test__start_subtest(test->file)) + return; + + btf = btf__parse_elf(test->file, &btf_ext); + err = libbpf_get_error(btf); + if (err) { + if (err == -ENOENT) { + printf("%s:SKIP: No ELF %s found", __func__, BTF_ELF_SEC); + test__skip(); + return; + } + return; + } + btf__free(btf); + + has_btf_ext = btf_ext != NULL; + btf_ext__free(btf_ext); + + /* temporary disable LIBBPF_STRICT_MAP_DEFINITIONS to test legacy maps */ + libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS); + obj = bpf_object__open(test->file); + err = libbpf_get_error(obj); + if (CHECK(err, "obj: %d", err)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (CHECK(!prog, "Cannot find bpf_prog")) { + err = -1; + goto done; + } + + bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT); + err = bpf_object__load(obj); + if (CHECK(err < 0, "bpf_object__load: %d", err)) + goto done; + prog_fd = bpf_program__fd(prog); + + map = bpf_object__find_map_by_name(obj, "btf_map"); + if (CHECK(!map, "btf_map not found")) { + err = -1; + goto done; + } + + err = (bpf_map__btf_key_type_id(map) == 0 || bpf_map__btf_value_type_id(map) == 0) + != test->btf_kv_notfound; + if (CHECK(err, "btf_key_type_id:%u btf_value_type_id:%u test->btf_kv_notfound:%u", + bpf_map__btf_key_type_id(map), bpf_map__btf_value_type_id(map), + test->btf_kv_notfound)) + goto done; + + if (!has_btf_ext) + goto skip; + + /* get necessary program info */ + info_len = sizeof(struct bpf_prog_info); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + + if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) { + fprintf(stderr, "%s\n", btf_log_buf); + err = -1; + goto done; + } + if (CHECK(info.nr_func_info != 3, + "incorrect info.nr_func_info (1st) %d", + info.nr_func_info)) { + err = -1; + goto done; + } + rec_size = info.func_info_rec_size; + if (CHECK(rec_size != sizeof(struct bpf_func_info), + "incorrect info.func_info_rec_size (1st) %d\n", rec_size)) { + err = -1; + goto done; + } + + func_info = malloc(info.nr_func_info * rec_size); + if (CHECK(!func_info, "out of memory")) { + err = -1; + goto done; + } + + /* reset info to only retrieve func_info related data */ + memset(&info, 0, sizeof(info)); + info.nr_func_info = 3; + info.func_info_rec_size = rec_size; + info.func_info = ptr_to_u64(func_info); + + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + + if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) { + fprintf(stderr, "%s\n", btf_log_buf); + err = -1; + goto done; + } + if (CHECK(info.nr_func_info != 3, + "incorrect info.nr_func_info (2nd) %d", + info.nr_func_info)) { + err = -1; + goto done; + } + if (CHECK(info.func_info_rec_size != rec_size, + "incorrect info.func_info_rec_size (2nd) %d", + info.func_info_rec_size)) { + err = -1; + goto done; + } + + btf = btf__load_from_kernel_by_id(info.btf_id); + err = libbpf_get_error(btf); + if (CHECK(err, "cannot get btf from kernel, err: %d", err)) + goto done; + + /* check three functions */ + finfo = func_info; + for (i = 0; i < 3; i++) { + const struct btf_type *t; + const char *fname; + + t = btf__type_by_id(btf, finfo->type_id); + if (CHECK(!t, "btf__type_by_id failure: id %u", + finfo->type_id)) { + err = -1; + goto done; + } + + fname = btf__name_by_offset(btf, t->name_off); + err = strcmp(fname, expected_fnames[i]); + /* for the second and third functions in .text section, + * the compiler may order them either way. + */ + if (i && err) + err = strcmp(fname, expected_fnames[3 - i]); + if (CHECK(err, "incorrect fname %s", fname ? : "")) { + err = -1; + goto done; + } + + finfo = (void *)finfo + rec_size; + } + +skip: + fprintf(stderr, "OK"); + +done: + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + btf__free(btf); + free(func_info); + bpf_object__close(obj); +} + +const char *pprint_enum_str[] = { + "ENUM_ZERO", + "ENUM_ONE", + "ENUM_TWO", + "ENUM_THREE", +}; + +struct pprint_mapv { + uint32_t ui32; + uint16_t ui16; + /* 2 bytes hole */ + int32_t si32; + uint32_t unused_bits2a:2, + bits28:28, + unused_bits2b:2; + union { + uint64_t ui64; + uint8_t ui8a[8]; + }; + enum { + ENUM_ZERO, + ENUM_ONE, + ENUM_TWO, + ENUM_THREE, + } aenum; + uint32_t ui32b; + uint32_t bits2c:2; + uint8_t si8_4[2][2]; +}; + +#ifdef __SIZEOF_INT128__ +struct pprint_mapv_int128 { + __int128 si128a; + __int128 si128b; + unsigned __int128 bits3:3; + unsigned __int128 bits80:80; + unsigned __int128 ui128; +}; +#endif + +static struct btf_raw_test pprint_test_template[] = { +{ + .raw_types = { + /* unsighed char */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1), + /* unsigned short */ /* [2] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2), + /* unsigned int */ /* [3] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), + /* int */ /* [4] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + /* unsigned long long */ /* [5] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8), + /* 2 bits */ /* [6] */ + BTF_TYPE_INT_ENC(0, 0, 0, 2, 2), + /* 28 bits */ /* [7] */ + BTF_TYPE_INT_ENC(0, 0, 0, 28, 4), + /* uint8_t[8] */ /* [8] */ + BTF_TYPE_ARRAY_ENC(9, 1, 8), + /* typedef unsigned char uint8_t */ /* [9] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), + /* typedef unsigned short uint16_t */ /* [10] */ + BTF_TYPEDEF_ENC(NAME_TBD, 2), + /* typedef unsigned int uint32_t */ /* [11] */ + BTF_TYPEDEF_ENC(NAME_TBD, 3), + /* typedef int int32_t */ /* [12] */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), + /* typedef unsigned long long uint64_t *//* [13] */ + BTF_TYPEDEF_ENC(NAME_TBD, 5), + /* union (anon) */ /* [14] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8), + BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */ + BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */ + /* enum (anon) */ /* [15] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4), + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_ENUM_ENC(NAME_TBD, 1), + BTF_ENUM_ENC(NAME_TBD, 2), + BTF_ENUM_ENC(NAME_TBD, 3), + /* struct pprint_mapv */ /* [16] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 11), 40), + BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ + BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ + BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ + BTF_MEMBER_ENC(NAME_TBD, 6, 96), /* unused_bits2a */ + BTF_MEMBER_ENC(NAME_TBD, 7, 98), /* bits28 */ + BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */ + BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */ + BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ + BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */ + BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */ + BTF_MEMBER_ENC(NAME_TBD, 17, 264), /* si8_4 */ + BTF_TYPE_ARRAY_ENC(18, 1, 2), /* [17] */ + BTF_TYPE_ARRAY_ENC(1, 1, 2), /* [18] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0si8_4"), + .key_size = sizeof(unsigned int), + .value_size = sizeof(struct pprint_mapv), + .key_type_id = 3, /* unsigned int */ + .value_type_id = 16, /* struct pprint_mapv */ + .max_entries = 128, +}, + +{ + /* this type will have the same type as the + * first .raw_types definition, but struct type will + * be encoded with kind_flag set. + */ + .raw_types = { + /* unsighed char */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1), + /* unsigned short */ /* [2] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2), + /* unsigned int */ /* [3] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), + /* int */ /* [4] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + /* unsigned long long */ /* [5] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8), + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [6] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [7] */ + /* uint8_t[8] */ /* [8] */ + BTF_TYPE_ARRAY_ENC(9, 1, 8), + /* typedef unsigned char uint8_t */ /* [9] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), + /* typedef unsigned short uint16_t */ /* [10] */ + BTF_TYPEDEF_ENC(NAME_TBD, 2), + /* typedef unsigned int uint32_t */ /* [11] */ + BTF_TYPEDEF_ENC(NAME_TBD, 3), + /* typedef int int32_t */ /* [12] */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), + /* typedef unsigned long long uint64_t *//* [13] */ + BTF_TYPEDEF_ENC(NAME_TBD, 5), + /* union (anon) */ /* [14] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8), + BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */ + BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */ + /* enum (anon) */ /* [15] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4), + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_ENUM_ENC(NAME_TBD, 1), + BTF_ENUM_ENC(NAME_TBD, 2), + BTF_ENUM_ENC(NAME_TBD, 3), + /* struct pprint_mapv */ /* [16] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 11), 40), + BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ + BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ + BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ + BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 96)), /* unused_bits2a */ + BTF_MEMBER_ENC(NAME_TBD, 7, BTF_MEMBER_OFFSET(28, 98)), /* bits28 */ + BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */ + BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ + BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ + BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ + BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ + BTF_MEMBER_ENC(NAME_TBD, 17, 264), /* si8_4 */ + BTF_TYPE_ARRAY_ENC(18, 1, 2), /* [17] */ + BTF_TYPE_ARRAY_ENC(1, 1, 2), /* [18] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0si8_4"), + .key_size = sizeof(unsigned int), + .value_size = sizeof(struct pprint_mapv), + .key_type_id = 3, /* unsigned int */ + .value_type_id = 16, /* struct pprint_mapv */ + .max_entries = 128, +}, + +{ + /* this type will have the same layout as the + * first .raw_types definition. The struct type will + * be encoded with kind_flag set, bitfield members + * are added typedef/const/volatile, and bitfield members + * will have both int and enum types. + */ + .raw_types = { + /* unsighed char */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1), + /* unsigned short */ /* [2] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2), + /* unsigned int */ /* [3] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), + /* int */ /* [4] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), + /* unsigned long long */ /* [5] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8), + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [6] */ + BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [7] */ + /* uint8_t[8] */ /* [8] */ + BTF_TYPE_ARRAY_ENC(9, 1, 8), + /* typedef unsigned char uint8_t */ /* [9] */ + BTF_TYPEDEF_ENC(NAME_TBD, 1), + /* typedef unsigned short uint16_t */ /* [10] */ + BTF_TYPEDEF_ENC(NAME_TBD, 2), + /* typedef unsigned int uint32_t */ /* [11] */ + BTF_TYPEDEF_ENC(NAME_TBD, 3), + /* typedef int int32_t */ /* [12] */ + BTF_TYPEDEF_ENC(NAME_TBD, 4), + /* typedef unsigned long long uint64_t *//* [13] */ + BTF_TYPEDEF_ENC(NAME_TBD, 5), + /* union (anon) */ /* [14] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8), + BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */ + BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */ + /* enum (anon) */ /* [15] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4), + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_ENUM_ENC(NAME_TBD, 1), + BTF_ENUM_ENC(NAME_TBD, 2), + BTF_ENUM_ENC(NAME_TBD, 3), + /* struct pprint_mapv */ /* [16] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 11), 40), + BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ + BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ + BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ + BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 96)), /* unused_bits2a */ + BTF_MEMBER_ENC(NAME_TBD, 7, BTF_MEMBER_OFFSET(28, 98)), /* bits28 */ + BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */ + BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ + BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ + BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */ + BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */ + BTF_MEMBER_ENC(NAME_TBD, 20, BTF_MEMBER_OFFSET(0, 264)), /* si8_4 */ + /* typedef unsigned int ___int */ /* [17] */ + BTF_TYPEDEF_ENC(NAME_TBD, 18), + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */ + BTF_TYPE_ARRAY_ENC(21, 1, 2), /* [20] */ + BTF_TYPE_ARRAY_ENC(1, 1, 2), /* [21] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int\0si8_4"), + .key_size = sizeof(unsigned int), + .value_size = sizeof(struct pprint_mapv), + .key_type_id = 3, /* unsigned int */ + .value_type_id = 16, /* struct pprint_mapv */ + .max_entries = 128, +}, + +#ifdef __SIZEOF_INT128__ +{ + /* test int128 */ + .raw_types = { + /* unsigned int */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), + /* __int128 */ /* [2] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 128, 16), + /* unsigned __int128 */ /* [3] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 128, 16), + /* struct pprint_mapv_int128 */ /* [4] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 5), 64), + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), /* si128a */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 128)), /* si128b */ + BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(3, 256)), /* bits3 */ + BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(80, 259)), /* bits80 */ + BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(0, 384)), /* ui128 */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0unsigned int\0__int128\0unsigned __int128\0pprint_mapv_int128\0si128a\0si128b\0bits3\0bits80\0ui128"), + .key_size = sizeof(unsigned int), + .value_size = sizeof(struct pprint_mapv_int128), + .key_type_id = 1, + .value_type_id = 4, + .max_entries = 128, + .mapv_kind = PPRINT_MAPV_KIND_INT128, +}, +#endif + +}; + +static struct btf_pprint_test_meta { + const char *descr; + enum bpf_map_type map_type; + const char *map_name; + bool ordered_map; + bool lossless_map; + bool percpu_map; +} pprint_tests_meta[] = { +{ + .descr = "BTF pretty print array", + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "pprint_test_array", + .ordered_map = true, + .lossless_map = true, + .percpu_map = false, +}, + +{ + .descr = "BTF pretty print hash", + .map_type = BPF_MAP_TYPE_HASH, + .map_name = "pprint_test_hash", + .ordered_map = false, + .lossless_map = true, + .percpu_map = false, +}, + +{ + .descr = "BTF pretty print lru hash", + .map_type = BPF_MAP_TYPE_LRU_HASH, + .map_name = "pprint_test_lru_hash", + .ordered_map = false, + .lossless_map = false, + .percpu_map = false, +}, + +{ + .descr = "BTF pretty print percpu array", + .map_type = BPF_MAP_TYPE_PERCPU_ARRAY, + .map_name = "pprint_test_percpu_array", + .ordered_map = true, + .lossless_map = true, + .percpu_map = true, +}, + +{ + .descr = "BTF pretty print percpu hash", + .map_type = BPF_MAP_TYPE_PERCPU_HASH, + .map_name = "pprint_test_percpu_hash", + .ordered_map = false, + .lossless_map = true, + .percpu_map = true, +}, + +{ + .descr = "BTF pretty print lru percpu hash", + .map_type = BPF_MAP_TYPE_LRU_PERCPU_HASH, + .map_name = "pprint_test_lru_percpu_hash", + .ordered_map = false, + .lossless_map = false, + .percpu_map = true, +}, + +}; + +static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind) +{ + if (mapv_kind == PPRINT_MAPV_KIND_BASIC) + return sizeof(struct pprint_mapv); + +#ifdef __SIZEOF_INT128__ + if (mapv_kind == PPRINT_MAPV_KIND_INT128) + return sizeof(struct pprint_mapv_int128); +#endif + + assert(0); +} + +static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind, + void *mapv, uint32_t i, + int num_cpus, int rounded_value_size) +{ + int cpu; + + if (mapv_kind == PPRINT_MAPV_KIND_BASIC) { + struct pprint_mapv *v = mapv; + + for (cpu = 0; cpu < num_cpus; cpu++) { + v->ui32 = i + cpu; + v->si32 = -i; + v->unused_bits2a = 3; + v->bits28 = i; + v->unused_bits2b = 3; + v->ui64 = i; + v->aenum = i & 0x03; + v->ui32b = 4; + v->bits2c = 1; + v->si8_4[0][0] = (cpu + i) & 0xff; + v->si8_4[0][1] = (cpu + i + 1) & 0xff; + v->si8_4[1][0] = (cpu + i + 2) & 0xff; + v->si8_4[1][1] = (cpu + i + 3) & 0xff; + v = (void *)v + rounded_value_size; + } + } + +#ifdef __SIZEOF_INT128__ + if (mapv_kind == PPRINT_MAPV_KIND_INT128) { + struct pprint_mapv_int128 *v = mapv; + + for (cpu = 0; cpu < num_cpus; cpu++) { + v->si128a = i; + v->si128b = -i; + v->bits3 = i & 0x07; + v->bits80 = (((unsigned __int128)1) << 64) + i; + v->ui128 = (((unsigned __int128)2) << 64) + i; + v = (void *)v + rounded_value_size; + } + } +#endif +} + +ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, + char *expected_line, ssize_t line_size, + bool percpu_map, unsigned int next_key, + int cpu, void *mapv) +{ + ssize_t nexpected_line = -1; + + if (mapv_kind == PPRINT_MAPV_KIND_BASIC) { + struct pprint_mapv *v = mapv; + + nexpected_line = snprintf(expected_line, line_size, + "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," + "{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," + "%u,0x%x,[[%d,%d],[%d,%d]]}\n", + percpu_map ? "\tcpu" : "", + percpu_map ? cpu : next_key, + v->ui32, v->si32, + v->unused_bits2a, + v->bits28, + v->unused_bits2b, + (__u64)v->ui64, + v->ui8a[0], v->ui8a[1], + v->ui8a[2], v->ui8a[3], + v->ui8a[4], v->ui8a[5], + v->ui8a[6], v->ui8a[7], + pprint_enum_str[v->aenum], + v->ui32b, + v->bits2c, + v->si8_4[0][0], v->si8_4[0][1], + v->si8_4[1][0], v->si8_4[1][1]); + } + +#ifdef __SIZEOF_INT128__ + if (mapv_kind == PPRINT_MAPV_KIND_INT128) { + struct pprint_mapv_int128 *v = mapv; + + nexpected_line = snprintf(expected_line, line_size, + "%s%u: {0x%lx,0x%lx,0x%lx," + "0x%lx%016lx,0x%lx%016lx}\n", + percpu_map ? "\tcpu" : "", + percpu_map ? cpu : next_key, + (uint64_t)v->si128a, + (uint64_t)v->si128b, + (uint64_t)v->bits3, + (uint64_t)(v->bits80 >> 64), + (uint64_t)v->bits80, + (uint64_t)(v->ui128 >> 64), + (uint64_t)v->ui128); + } +#endif + + return nexpected_line; +} + +static int check_line(const char *expected_line, int nexpected_line, + int expected_line_len, const char *line) +{ + if (CHECK(nexpected_line == expected_line_len, + "expected_line is too long")) + return -1; + + if (strcmp(expected_line, line)) { + fprintf(stderr, "unexpected pprint output\n"); + fprintf(stderr, "expected: %s", expected_line); + fprintf(stderr, " read: %s", line); + return -1; + } + + return 0; +} + + +static void do_test_pprint(int test_num) +{ + const struct btf_raw_test *test = &pprint_test_template[test_num]; + enum pprint_mapv_kind_t mapv_kind = test->mapv_kind; + LIBBPF_OPTS(bpf_map_create_opts, opts); + bool ordered_map, lossless_map, percpu_map; + int err, ret, num_cpus, rounded_value_size; + unsigned int key, nr_read_elems; + int map_fd = -1, btf_fd = -1; + unsigned int raw_btf_size; + char expected_line[255]; + FILE *pin_file = NULL; + char pin_path[255]; + size_t line_len = 0; + char *line = NULL; + void *mapv = NULL; + uint8_t *raw_btf; + ssize_t nread; + + if (!test__start_subtest(test->descr)) + return; + + raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types, + test->str_sec, test->str_sec_size, + &raw_btf_size, NULL); + + if (!raw_btf) + return; + + *btf_log_buf = '\0'; + btf_fd = load_raw_btf(raw_btf, raw_btf_size); + free(raw_btf); + + if (CHECK(btf_fd < 0, "errno:%d\n", errno)) { + err = -1; + goto done; + } + + opts.btf_fd = btf_fd; + opts.btf_key_type_id = test->key_type_id; + opts.btf_value_type_id = test->value_type_id; + map_fd = bpf_map_create(test->map_type, test->map_name, + test->key_size, test->value_size, test->max_entries, &opts); + if (CHECK(map_fd < 0, "errno:%d", errno)) { + err = -1; + goto done; + } + + ret = snprintf(pin_path, sizeof(pin_path), "%s/%s", + "/sys/fs/bpf", test->map_name); + + if (CHECK(ret >= sizeof(pin_path), "pin_path %s/%s is too long", + "/sys/fs/bpf", test->map_name)) { + err = -1; + goto done; + } + + err = bpf_obj_pin(map_fd, pin_path); + if (CHECK(err, "bpf_obj_pin(%s): errno:%d.", pin_path, errno)) + goto done; + + percpu_map = test->percpu_map; + num_cpus = percpu_map ? bpf_num_possible_cpus() : 1; + rounded_value_size = round_up(get_pprint_mapv_size(mapv_kind), 8); + mapv = calloc(num_cpus, rounded_value_size); + if (CHECK(!mapv, "mapv allocation failure")) { + err = -1; + goto done; + } + + for (key = 0; key < test->max_entries; key++) { + set_pprint_mapv(mapv_kind, mapv, key, num_cpus, rounded_value_size); + bpf_map_update_elem(map_fd, &key, mapv, 0); + } + + pin_file = fopen(pin_path, "r"); + if (CHECK(!pin_file, "fopen(%s): errno:%d", pin_path, errno)) { + err = -1; + goto done; + } + + /* Skip lines start with '#' */ + while ((nread = getline(&line, &line_len, pin_file)) > 0 && + *line == '#') + ; + + if (CHECK(nread <= 0, "Unexpected EOF")) { + err = -1; + goto done; + } + + nr_read_elems = 0; + ordered_map = test->ordered_map; + lossless_map = test->lossless_map; + do { + ssize_t nexpected_line; + unsigned int next_key; + void *cmapv; + int cpu; + + next_key = ordered_map ? nr_read_elems : atoi(line); + set_pprint_mapv(mapv_kind, mapv, next_key, num_cpus, rounded_value_size); + cmapv = mapv; + + for (cpu = 0; cpu < num_cpus; cpu++) { + if (percpu_map) { + /* for percpu map, the format looks like: + * <key>: { + * cpu0: <value_on_cpu0> + * cpu1: <value_on_cpu1> + * ... + * cpun: <value_on_cpun> + * } + * + * let us verify the line containing the key here. + */ + if (cpu == 0) { + nexpected_line = snprintf(expected_line, + sizeof(expected_line), + "%u: {\n", + next_key); + + err = check_line(expected_line, nexpected_line, + sizeof(expected_line), line); + if (err < 0) + goto done; + } + + /* read value@cpu */ + nread = getline(&line, &line_len, pin_file); + if (nread < 0) + break; + } + + nexpected_line = get_pprint_expected_line(mapv_kind, expected_line, + sizeof(expected_line), + percpu_map, next_key, + cpu, cmapv); + err = check_line(expected_line, nexpected_line, + sizeof(expected_line), line); + if (err < 0) + goto done; + + cmapv = cmapv + rounded_value_size; + } + + if (percpu_map) { + /* skip the last bracket for the percpu map */ + nread = getline(&line, &line_len, pin_file); + if (nread < 0) + break; + } + + nread = getline(&line, &line_len, pin_file); + } while (++nr_read_elems < test->max_entries && nread > 0); + + if (lossless_map && + CHECK(nr_read_elems < test->max_entries, + "Unexpected EOF. nr_read_elems:%u test->max_entries:%u", + nr_read_elems, test->max_entries)) { + err = -1; + goto done; + } + + if (CHECK(nread > 0, "Unexpected extra pprint output: %s", line)) { + err = -1; + goto done; + } + + err = 0; + +done: + if (mapv) + free(mapv); + if (!err) + fprintf(stderr, "OK"); + if (*btf_log_buf && (err || always_log)) + fprintf(stderr, "\n%s", btf_log_buf); + if (btf_fd >= 0) + close(btf_fd); + if (map_fd >= 0) + close(map_fd); + if (pin_file) + fclose(pin_file); + unlink(pin_path); + free(line); +} + +static void test_pprint(void) +{ + unsigned int i; + + /* test various maps with the first test template */ + for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) { + pprint_test_template[0].descr = pprint_tests_meta[i].descr; + pprint_test_template[0].map_type = pprint_tests_meta[i].map_type; + pprint_test_template[0].map_name = pprint_tests_meta[i].map_name; + pprint_test_template[0].ordered_map = pprint_tests_meta[i].ordered_map; + pprint_test_template[0].lossless_map = pprint_tests_meta[i].lossless_map; + pprint_test_template[0].percpu_map = pprint_tests_meta[i].percpu_map; + + do_test_pprint(0); + } + + /* test rest test templates with the first map */ + for (i = 1; i < ARRAY_SIZE(pprint_test_template); i++) { + pprint_test_template[i].descr = pprint_tests_meta[0].descr; + pprint_test_template[i].map_type = pprint_tests_meta[0].map_type; + pprint_test_template[i].map_name = pprint_tests_meta[0].map_name; + pprint_test_template[i].ordered_map = pprint_tests_meta[0].ordered_map; + pprint_test_template[i].lossless_map = pprint_tests_meta[0].lossless_map; + pprint_test_template[i].percpu_map = pprint_tests_meta[0].percpu_map; + do_test_pprint(i); + } +} + +#define BPF_LINE_INFO_ENC(insn_off, file_off, line_off, line_num, line_col) \ + (insn_off), (file_off), (line_off), ((line_num) << 10 | ((line_col) & 0x3ff)) + +static struct prog_info_raw_test { + const char *descr; + const char *str_sec; + const char *err_str; + __u32 raw_types[MAX_NR_RAW_U32]; + __u32 str_sec_size; + struct bpf_insn insns[MAX_INSNS]; + __u32 prog_type; + __u32 func_info[MAX_SUBPROGS][2]; + __u32 func_info_rec_size; + __u32 func_info_cnt; + __u32 line_info[MAX_NR_RAW_U32]; + __u32 line_info_rec_size; + __u32 nr_jited_ksyms; + bool expected_prog_load_failure; + __u32 dead_code_cnt; + __u32 dead_code_mask; + __u32 dead_func_cnt; + __u32 dead_func_mask; +} info_raw_tests[] = { +{ + .descr = "func_type (main func + one sub)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */ + BTF_FUNC_PROTO_ENC(1, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_FUNC_PROTO_ENC(1, 2), /* [4] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */ + BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */ + BTF_END_RAW, + }, + .str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB", + .str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"), + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info = { {0, 5}, {3, 6} }, + .func_info_rec_size = 8, + .func_info_cnt = 2, + .line_info = { BTF_END_RAW }, +}, + +{ + .descr = "func_type (Incorrect func_info_rec_size)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */ + BTF_FUNC_PROTO_ENC(1, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_FUNC_PROTO_ENC(1, 2), /* [4] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */ + BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */ + BTF_END_RAW, + }, + .str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB", + .str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"), + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info = { {0, 5}, {3, 6} }, + .func_info_rec_size = 4, + .func_info_cnt = 2, + .line_info = { BTF_END_RAW }, + .expected_prog_load_failure = true, +}, + +{ + .descr = "func_type (Incorrect func_info_cnt)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */ + BTF_FUNC_PROTO_ENC(1, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_FUNC_PROTO_ENC(1, 2), /* [4] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */ + BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */ + BTF_END_RAW, + }, + .str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB", + .str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"), + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info = { {0, 5}, {3, 6} }, + .func_info_rec_size = 8, + .func_info_cnt = 1, + .line_info = { BTF_END_RAW }, + .expected_prog_load_failure = true, +}, + +{ + .descr = "func_type (Incorrect bpf_func_info.insn_off)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */ + BTF_FUNC_PROTO_ENC(1, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_FUNC_PROTO_ENC(1, 2), /* [4] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */ + BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */ + BTF_END_RAW, + }, + .str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB", + .str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"), + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info = { {0, 5}, {2, 6} }, + .func_info_rec_size = 8, + .func_info_cnt = 2, + .line_info = { BTF_END_RAW }, + .expected_prog_load_failure = true, +}, + +{ + .descr = "line_info (No subprog)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 1, +}, + +{ + .descr = "line_info (No subprog. insn_off >= prog->len)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), + BPF_LINE_INFO_ENC(4, 0, 0, 5, 6), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 1, + .err_str = "line_info[4].insn_off", + .expected_prog_load_failure = true, +}, + +{ + .descr = "line_info (Zero bpf insn code)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8), /* [2] */ + BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0unsigned long\0u64\0u64 a=1;\0return a;"), + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(1, 0, 0, 2, 9), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 1, + .err_str = "Invalid insn code at line_info[1]", + .expected_prog_load_failure = true, +}, + +{ + .descr = "line_info (No subprog. zero tailing line_info", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), 0, + BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), 0, + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), 0, + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), 0, + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info) + sizeof(__u32), + .nr_jited_ksyms = 1, +}, + +{ + .descr = "line_info (No subprog. nonzero tailing line_info)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), 0, + BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), 0, + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), 0, + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), 1, + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info) + sizeof(__u32), + .nr_jited_ksyms = 1, + .err_str = "nonzero tailing record in line_info", + .expected_prog_load_failure = true, +}, + +{ + .descr = "line_info (subprog)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, +}, + +{ + .descr = "line_info (subprog + func_info)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 2, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {5, 3} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, +}, + +{ + .descr = "line_info (subprog. missing 1st func line info)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .err_str = "missing bpf_line_info for func#0", + .expected_prog_load_failure = true, +}, + +{ + .descr = "line_info (subprog. missing 2nd func line info)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .err_str = "missing bpf_line_info for func#1", + .expected_prog_load_failure = true, +}, + +{ + .descr = "line_info (subprog. unordered insn offset)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .err_str = "Invalid line_info[2].insn_off", + .expected_prog_load_failure = true, +}, + +{ + .descr = "line_info (dead start)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0/* dead jmp */\0int a=1;\0int b=2;\0return a + b;\0return a + b;"), + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 6), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 1, + .dead_code_cnt = 1, + .dead_code_mask = 0x01, +}, + +{ + .descr = "line_info (dead end)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0/* dead jmp */\0return a + b;\0/* dead exit */"), + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 12), + BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 11), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 9), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 8), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 6, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 1, + .dead_code_cnt = 2, + .dead_code_mask = 0x28, +}, + +{ + .descr = "line_info (dead code + subprog + func_info)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0/* dead jmp */" + "\0/* dead */\0/* dead */\0/* dead */\0/* dead */" + "\0/* dead */\0/* dead */\0/* dead */\0/* dead */" + "\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 8), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 2, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {14, 3} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(14, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(16, 0, NAME_TBD, 4, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .dead_code_cnt = 9, + .dead_code_mask = 0x3fe, +}, + +{ + .descr = "line_info (dead subprog)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */" + "\0return 0;\0return 0;\0/* dead */\0/* dead */" + "\0/* dead */\0return bla + 1;\0return bla + 1;" + "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_CALL_REL(3), + BPF_CALL_REL(5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 3, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {6, 3}, {9, 5} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .dead_code_cnt = 3, + .dead_code_mask = 0x70, + .dead_func_cnt = 1, + .dead_func_mask = 0x2, +}, + +{ + .descr = "line_info (dead last subprog)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0dead\0main\0int a=1+1;\0/* live call */" + "\0return 0;\0/* dead */\0/* dead */"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_CALL_REL(2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 2, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {5, 3} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 1, + .dead_code_cnt = 2, + .dead_code_mask = 0x18, + .dead_func_cnt = 1, + .dead_func_mask = 0x2, +}, + +{ + .descr = "line_info (dead subprog + dead start)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* dead */" + "\0return 0;\0return 0;\0return 0;" + "\0/* dead */\0/* dead */\0/* dead */\0/* dead */" + "\0return b + 1;\0return b + 1;\0return b + 1;"), + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_CALL_REL(3), + BPF_CALL_REL(5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 3, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {7, 3}, {10, 5} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(13, 0, NAME_TBD, 2, 9), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .dead_code_cnt = 5, + .dead_code_mask = 0x1e2, + .dead_func_cnt = 1, + .dead_func_mask = 0x2, +}, + +{ + .descr = "line_info (dead subprog + dead start w/ move)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */" + "\0return 0;\0return 0;\0/* dead */\0/* dead */" + "\0/* dead */\0return bla + 1;\0return bla + 1;" + "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_CALL_REL(3), + BPF_CALL_REL(5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 3, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {6, 3}, {9, 5} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .dead_code_cnt = 3, + .dead_code_mask = 0x70, + .dead_func_cnt = 1, + .dead_func_mask = 0x2, +}, + +{ + .descr = "line_info (dead end + subprog start w/ no linfo)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0main\0func\0/* main linfo */\0/* func linfo */"), + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 1, 3), + BPF_CALL_REL(3), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 2, + .func_info_rec_size = 8, + .func_info = { {0, 3}, {6, 4}, }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, +}, + +}; + +static size_t probe_prog_length(const struct bpf_insn *fp) +{ + size_t len; + + for (len = MAX_INSNS - 1; len > 0; --len) + if (fp[len].code != 0 || fp[len].imm != 0) + break; + return len + 1; +} + +static __u32 *patch_name_tbd(const __u32 *raw_u32, + const char *str, __u32 str_off, + unsigned int str_sec_size, + unsigned int *ret_size) +{ + int i, raw_u32_size = get_raw_sec_size(raw_u32); + const char *end_str = str + str_sec_size; + const char *next_str = str + str_off; + __u32 *new_u32 = NULL; + + if (raw_u32_size == -1) + return ERR_PTR(-EINVAL); + + if (!raw_u32_size) { + *ret_size = 0; + return NULL; + } + + new_u32 = malloc(raw_u32_size); + if (!new_u32) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < raw_u32_size / sizeof(raw_u32[0]); i++) { + if (raw_u32[i] == NAME_TBD) { + next_str = get_next_str(next_str, end_str); + if (CHECK(!next_str, "Error in getting next_str\n")) { + free(new_u32); + return ERR_PTR(-EINVAL); + } + new_u32[i] = next_str - str; + next_str += strlen(next_str); + } else { + new_u32[i] = raw_u32[i]; + } + } + + *ret_size = raw_u32_size; + return new_u32; +} + +static int test_get_finfo(const struct prog_info_raw_test *test, + int prog_fd) +{ + struct bpf_prog_info info = {}; + struct bpf_func_info *finfo; + __u32 info_len, rec_size, i; + void *func_info = NULL; + __u32 nr_func_info; + int err; + + /* get necessary lens */ + info_len = sizeof(struct bpf_prog_info); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (CHECK(err < 0, "invalid get info (1st) errno:%d", errno)) { + fprintf(stderr, "%s\n", btf_log_buf); + return -1; + } + nr_func_info = test->func_info_cnt - test->dead_func_cnt; + if (CHECK(info.nr_func_info != nr_func_info, + "incorrect info.nr_func_info (1st) %d", + info.nr_func_info)) { + return -1; + } + + rec_size = info.func_info_rec_size; + if (CHECK(rec_size != sizeof(struct bpf_func_info), + "incorrect info.func_info_rec_size (1st) %d", rec_size)) { + return -1; + } + + if (!info.nr_func_info) + return 0; + + func_info = malloc(info.nr_func_info * rec_size); + if (CHECK(!func_info, "out of memory")) + return -1; + + /* reset info to only retrieve func_info related data */ + memset(&info, 0, sizeof(info)); + info.nr_func_info = nr_func_info; + info.func_info_rec_size = rec_size; + info.func_info = ptr_to_u64(func_info); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (CHECK(err < 0, "invalid get info (2nd) errno:%d", errno)) { + fprintf(stderr, "%s\n", btf_log_buf); + err = -1; + goto done; + } + if (CHECK(info.nr_func_info != nr_func_info, + "incorrect info.nr_func_info (2nd) %d", + info.nr_func_info)) { + err = -1; + goto done; + } + if (CHECK(info.func_info_rec_size != rec_size, + "incorrect info.func_info_rec_size (2nd) %d", + info.func_info_rec_size)) { + err = -1; + goto done; + } + + finfo = func_info; + for (i = 0; i < nr_func_info; i++) { + if (test->dead_func_mask & (1 << i)) + continue; + if (CHECK(finfo->type_id != test->func_info[i][1], + "incorrect func_type %u expected %u", + finfo->type_id, test->func_info[i][1])) { + err = -1; + goto done; + } + finfo = (void *)finfo + rec_size; + } + + err = 0; + +done: + free(func_info); + return err; +} + +static int test_get_linfo(const struct prog_info_raw_test *test, + const void *patched_linfo, + __u32 cnt, int prog_fd) +{ + __u32 i, info_len, nr_jited_ksyms, nr_jited_func_lens; + __u64 *jited_linfo = NULL, *jited_ksyms = NULL; + __u32 rec_size, jited_rec_size, jited_cnt; + struct bpf_line_info *linfo = NULL; + __u32 cur_func_len, ksyms_found; + struct bpf_prog_info info = {}; + __u32 *jited_func_lens = NULL; + __u64 cur_func_ksyms; + __u32 dead_insns; + int err; + + jited_cnt = cnt; + rec_size = sizeof(*linfo); + jited_rec_size = sizeof(*jited_linfo); + if (test->nr_jited_ksyms) + nr_jited_ksyms = test->nr_jited_ksyms; + else + nr_jited_ksyms = test->func_info_cnt - test->dead_func_cnt; + nr_jited_func_lens = nr_jited_ksyms; + + info_len = sizeof(struct bpf_prog_info); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (CHECK(err < 0, "err:%d errno:%d", err, errno)) { + err = -1; + goto done; + } + + if (!info.jited_prog_len) { + /* prog is not jited */ + jited_cnt = 0; + nr_jited_ksyms = 1; + nr_jited_func_lens = 1; + } + + if (CHECK(info.nr_line_info != cnt || + info.nr_jited_line_info != jited_cnt || + info.nr_jited_ksyms != nr_jited_ksyms || + info.nr_jited_func_lens != nr_jited_func_lens || + (!info.nr_line_info && info.nr_jited_line_info), + "info: nr_line_info:%u(expected:%u) nr_jited_line_info:%u(expected:%u) nr_jited_ksyms:%u(expected:%u) nr_jited_func_lens:%u(expected:%u)", + info.nr_line_info, cnt, + info.nr_jited_line_info, jited_cnt, + info.nr_jited_ksyms, nr_jited_ksyms, + info.nr_jited_func_lens, nr_jited_func_lens)) { + err = -1; + goto done; + } + + if (CHECK(info.line_info_rec_size != sizeof(struct bpf_line_info) || + info.jited_line_info_rec_size != sizeof(__u64), + "info: line_info_rec_size:%u(userspace expected:%u) jited_line_info_rec_size:%u(userspace expected:%u)", + info.line_info_rec_size, rec_size, + info.jited_line_info_rec_size, jited_rec_size)) { + err = -1; + goto done; + } + + if (!cnt) + return 0; + + rec_size = info.line_info_rec_size; + jited_rec_size = info.jited_line_info_rec_size; + + memset(&info, 0, sizeof(info)); + + linfo = calloc(cnt, rec_size); + if (CHECK(!linfo, "!linfo")) { + err = -1; + goto done; + } + info.nr_line_info = cnt; + info.line_info_rec_size = rec_size; + info.line_info = ptr_to_u64(linfo); + + if (jited_cnt) { + jited_linfo = calloc(jited_cnt, jited_rec_size); + jited_ksyms = calloc(nr_jited_ksyms, sizeof(*jited_ksyms)); + jited_func_lens = calloc(nr_jited_func_lens, + sizeof(*jited_func_lens)); + if (CHECK(!jited_linfo || !jited_ksyms || !jited_func_lens, + "jited_linfo:%p jited_ksyms:%p jited_func_lens:%p", + jited_linfo, jited_ksyms, jited_func_lens)) { + err = -1; + goto done; + } + + info.nr_jited_line_info = jited_cnt; + info.jited_line_info_rec_size = jited_rec_size; + info.jited_line_info = ptr_to_u64(jited_linfo); + info.nr_jited_ksyms = nr_jited_ksyms; + info.jited_ksyms = ptr_to_u64(jited_ksyms); + info.nr_jited_func_lens = nr_jited_func_lens; + info.jited_func_lens = ptr_to_u64(jited_func_lens); + } + + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + + /* + * Only recheck the info.*line_info* fields. + * Other fields are not the concern of this test. + */ + if (CHECK(err < 0 || + info.nr_line_info != cnt || + (jited_cnt && !info.jited_line_info) || + info.nr_jited_line_info != jited_cnt || + info.line_info_rec_size != rec_size || + info.jited_line_info_rec_size != jited_rec_size, + "err:%d errno:%d info: nr_line_info:%u(expected:%u) nr_jited_line_info:%u(expected:%u) line_info_rec_size:%u(expected:%u) jited_linfo_rec_size:%u(expected:%u) line_info:%p jited_line_info:%p", + err, errno, + info.nr_line_info, cnt, + info.nr_jited_line_info, jited_cnt, + info.line_info_rec_size, rec_size, + info.jited_line_info_rec_size, jited_rec_size, + (void *)(long)info.line_info, + (void *)(long)info.jited_line_info)) { + err = -1; + goto done; + } + + dead_insns = 0; + while (test->dead_code_mask & (1 << dead_insns)) + dead_insns++; + + CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u", + linfo[0].insn_off); + for (i = 1; i < cnt; i++) { + const struct bpf_line_info *expected_linfo; + + while (test->dead_code_mask & (1 << (i + dead_insns))) + dead_insns++; + + expected_linfo = patched_linfo + + ((i + dead_insns) * test->line_info_rec_size); + if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off, + "linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u", + i, linfo[i].insn_off, + i - 1, linfo[i - 1].insn_off)) { + err = -1; + goto done; + } + if (CHECK(linfo[i].file_name_off != expected_linfo->file_name_off || + linfo[i].line_off != expected_linfo->line_off || + linfo[i].line_col != expected_linfo->line_col, + "linfo[%u] (%u, %u, %u) != (%u, %u, %u)", i, + linfo[i].file_name_off, + linfo[i].line_off, + linfo[i].line_col, + expected_linfo->file_name_off, + expected_linfo->line_off, + expected_linfo->line_col)) { + err = -1; + goto done; + } + } + + if (!jited_cnt) { + fprintf(stderr, "not jited. skipping jited_line_info check. "); + err = 0; + goto done; + } + + if (CHECK(jited_linfo[0] != jited_ksyms[0], + "jited_linfo[0]:%lx != jited_ksyms[0]:%lx", + (long)(jited_linfo[0]), (long)(jited_ksyms[0]))) { + err = -1; + goto done; + } + + ksyms_found = 1; + cur_func_len = jited_func_lens[0]; + cur_func_ksyms = jited_ksyms[0]; + for (i = 1; i < jited_cnt; i++) { + if (ksyms_found < nr_jited_ksyms && + jited_linfo[i] == jited_ksyms[ksyms_found]) { + cur_func_ksyms = jited_ksyms[ksyms_found]; + cur_func_len = jited_ksyms[ksyms_found]; + ksyms_found++; + continue; + } + + if (CHECK(jited_linfo[i] <= jited_linfo[i - 1], + "jited_linfo[%u]:%lx <= jited_linfo[%u]:%lx", + i, (long)jited_linfo[i], + i - 1, (long)(jited_linfo[i - 1]))) { + err = -1; + goto done; + } + + if (CHECK(jited_linfo[i] - cur_func_ksyms > cur_func_len, + "jited_linfo[%u]:%lx - %lx > %u", + i, (long)jited_linfo[i], (long)cur_func_ksyms, + cur_func_len)) { + err = -1; + goto done; + } + } + + if (CHECK(ksyms_found != nr_jited_ksyms, + "ksyms_found:%u != nr_jited_ksyms:%u", + ksyms_found, nr_jited_ksyms)) { + err = -1; + goto done; + } + + err = 0; + +done: + free(linfo); + free(jited_linfo); + free(jited_ksyms); + free(jited_func_lens); + return err; +} + +static void do_test_info_raw(unsigned int test_num) +{ + const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1]; + unsigned int raw_btf_size, linfo_str_off, linfo_size = 0; + int btf_fd = -1, prog_fd = -1, err = 0; + void *raw_btf, *patched_linfo = NULL; + const char *ret_next_str; + union bpf_attr attr = {}; + + if (!test__start_subtest(test->descr)) + return; + + raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types, + test->str_sec, test->str_sec_size, + &raw_btf_size, &ret_next_str); + if (!raw_btf) + return; + + *btf_log_buf = '\0'; + btf_fd = load_raw_btf(raw_btf, raw_btf_size); + free(raw_btf); + + if (CHECK(btf_fd < 0, "invalid btf_fd errno:%d", errno)) { + err = -1; + goto done; + } + + if (*btf_log_buf && always_log) + fprintf(stderr, "\n%s", btf_log_buf); + *btf_log_buf = '\0'; + + linfo_str_off = ret_next_str - test->str_sec; + patched_linfo = patch_name_tbd(test->line_info, + test->str_sec, linfo_str_off, + test->str_sec_size, &linfo_size); + err = libbpf_get_error(patched_linfo); + if (err) { + fprintf(stderr, "error in creating raw bpf_line_info"); + err = -1; + goto done; + } + + attr.prog_type = test->prog_type; + attr.insns = ptr_to_u64(test->insns); + attr.insn_cnt = probe_prog_length(test->insns); + attr.license = ptr_to_u64("GPL"); + attr.prog_btf_fd = btf_fd; + attr.func_info_rec_size = test->func_info_rec_size; + attr.func_info_cnt = test->func_info_cnt; + attr.func_info = ptr_to_u64(test->func_info); + attr.log_buf = ptr_to_u64(btf_log_buf); + attr.log_size = BTF_LOG_BUF_SIZE; + attr.log_level = 1; + if (linfo_size) { + attr.line_info_rec_size = test->line_info_rec_size; + attr.line_info = ptr_to_u64(patched_linfo); + attr.line_info_cnt = linfo_size / attr.line_info_rec_size; + } + + prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); + err = ((prog_fd < 0) != test->expected_prog_load_failure); + if (CHECK(err, "prog_fd:%d expected_prog_load_failure:%u errno:%d", + prog_fd, test->expected_prog_load_failure, errno) || + CHECK(test->err_str && !strstr(btf_log_buf, test->err_str), + "expected err_str:%s", test->err_str)) { + err = -1; + goto done; + } + + if (prog_fd < 0) + goto done; + + err = test_get_finfo(test, prog_fd); + if (err) + goto done; + + err = test_get_linfo(test, patched_linfo, + attr.line_info_cnt - test->dead_code_cnt, + prog_fd); + if (err) + goto done; + +done: + if (*btf_log_buf && (err || always_log)) + fprintf(stderr, "\n%s", btf_log_buf); + + if (btf_fd >= 0) + close(btf_fd); + if (prog_fd >= 0) + close(prog_fd); + + if (!libbpf_get_error(patched_linfo)) + free(patched_linfo); +} + +struct btf_raw_data { + __u32 raw_types[MAX_NR_RAW_U32]; + const char *str_sec; + __u32 str_sec_size; +}; + +struct btf_dedup_test { + const char *descr; + struct btf_raw_data input; + struct btf_raw_data expect; + struct btf_dedup_opts opts; +}; + +static struct btf_dedup_test dedup_tests[] = { + +{ + .descr = "dedup: unused strings filtering", + .input = { + .raw_types = { + BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 4), + BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 64, 8), + BTF_END_RAW, + }, + BTF_STR_SEC("\0unused\0int\0foo\0bar\0long"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), + BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8), + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0long"), + }, +}, +{ + .descr = "dedup: strings deduplication", + .input = { + .raw_types = { + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), + BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8), + BTF_TYPE_INT_ENC(NAME_NTH(3), BTF_INT_SIGNED, 0, 32, 4), + BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 64, 8), + BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4), + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0long int\0int\0long int\0int"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), + BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8), + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0long int"), + }, +}, +{ + .descr = "dedup: struct example #1", + /* + * struct s { + * struct s *next; + * const int *a; + * int b[16]; + * int c; + * } + */ + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* int[16] */ + BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */ + /* struct s { */ + BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(3), 4, 0), /* struct s *next; */ + BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */ + BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */ + BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */ + BTF_MEMBER_ENC(NAME_NTH(8), 15, 672), /* float d; */ + /* ptr -> [3] struct s */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> [6] const int */ + BTF_PTR_ENC(6), /* [5] */ + /* const -> [1] int */ + BTF_CONST_ENC(1), /* [6] */ + /* tag -> [3] struct s */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ + /* tag -> [3] struct s, member 1 */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ + + /* full copy of the above */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [9] */ + BTF_TYPE_ARRAY_ENC(9, 9, 16), /* [10] */ + BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [11] */ + BTF_MEMBER_ENC(NAME_NTH(3), 12, 0), + BTF_MEMBER_ENC(NAME_NTH(4), 13, 64), + BTF_MEMBER_ENC(NAME_NTH(5), 10, 128), + BTF_MEMBER_ENC(NAME_NTH(6), 9, 640), + BTF_MEMBER_ENC(NAME_NTH(8), 15, 672), + BTF_PTR_ENC(11), /* [12] */ + BTF_PTR_ENC(14), /* [13] */ + BTF_CONST_ENC(9), /* [14] */ + BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [15] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 11, -1), /* [16] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 11, 1), /* [17] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0float\0d"), + }, + .expect = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* int[16] */ + BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */ + /* struct s { */ + BTF_STRUCT_ENC(NAME_NTH(8), 5, 88), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(7), 4, 0), /* struct s *next; */ + BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */ + BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */ + BTF_MEMBER_ENC(NAME_NTH(4), 9, 672), /* float d; */ + /* ptr -> [3] struct s */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> [6] const int */ + BTF_PTR_ENC(6), /* [5] */ + /* const -> [1] int */ + BTF_CONST_ENC(1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ + BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [9] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"), + }, +}, +{ + .descr = "dedup: struct <-> fwd resolution w/ hash collision", + /* + * // CU 1: + * struct x; + * struct s { + * struct x *x; + * }; + * // CU 2: + * struct x {}; + * struct s { + * struct x *x; + * }; + */ + .input = { + .raw_types = { + /* CU 1 */ + BTF_FWD_ENC(NAME_TBD, 0 /* struct fwd */), /* [1] fwd x */ + BTF_PTR_ENC(1), /* [2] ptr -> [1] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] struct s */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + /* CU 2 */ + BTF_STRUCT_ENC(NAME_TBD, 0, 0), /* [4] struct x */ + BTF_PTR_ENC(4), /* [5] ptr -> [4] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [6] struct s */ + BTF_MEMBER_ENC(NAME_TBD, 5, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0x\0s\0x\0x\0s\0x\0"), + }, + .expect = { + .raw_types = { + BTF_PTR_ENC(3), /* [1] ptr -> [3] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [2] struct s */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_STRUCT_ENC(NAME_NTH(2), 0, 0), /* [3] struct x */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0s\0x"), + }, + .opts = { + .force_collisions = true, /* force hash collisions */ + }, +}, +{ + .descr = "dedup: void equiv check", + /* + * // CU 1: + * struct s { + * struct {} *x; + * }; + * // CU 2: + * struct s { + * int *x; + * }; + */ + .input = { + .raw_types = { + /* CU 1 */ + BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */ + BTF_PTR_ENC(1), /* [2] ptr -> [1] */ + BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + /* CU 2 */ + BTF_PTR_ENC(0), /* [4] ptr -> void */ + BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */ + BTF_MEMBER_ENC(NAME_NTH(2), 4, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0s\0x"), + }, + .expect = { + .raw_types = { + /* CU 1 */ + BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */ + BTF_PTR_ENC(1), /* [2] ptr -> [1] */ + BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */ + BTF_MEMBER_ENC(NAME_NTH(2), 2, 0), + /* CU 2 */ + BTF_PTR_ENC(0), /* [4] ptr -> void */ + BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */ + BTF_MEMBER_ENC(NAME_NTH(2), 4, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0s\0x"), + }, + .opts = { + .force_collisions = true, /* force hash collisions */ + }, +}, +{ + .descr = "dedup: all possible kinds (no duplicates)", + .input = { + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_ENUM_ENC(NAME_TBD, 1), + BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */ + BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */ + BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */ + BTF_PTR_ENC(0), /* [8] ptr */ + BTF_CONST_ENC(8), /* [9] const */ + BTF_VOLATILE_ENC(8), /* [10] volatile */ + BTF_RESTRICT_ENC(8), /* [11] restrict */ + BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18), + BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */ + BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */ + BTF_ENUM64_ENC(NAME_TBD, 0, 0), + BTF_ENUM64_ENC(NAME_TBD, 1, 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */ + BTF_ENUM_ENC(NAME_TBD, 0), + BTF_ENUM_ENC(NAME_TBD, 1), + BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */ + BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */ + BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */ + BTF_PTR_ENC(0), /* [8] ptr */ + BTF_CONST_ENC(8), /* [9] const */ + BTF_VOLATILE_ENC(8), /* [10] volatile */ + BTF_RESTRICT_ENC(8), /* [11] restrict */ + BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18), + BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ + BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */ + BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */ + BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */ + BTF_ENUM64_ENC(NAME_TBD, 0, 0), + BTF_ENUM64_ENC(NAME_TBD, 1, 1), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"), + }, +}, +{ + .descr = "dedup: no int/float duplicates", + .input = { + .raw_types = { + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8), + /* different name */ + BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8), + /* different encoding */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8), + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8), + /* different bit offset */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8), + /* different bit size */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8), + /* different byte size */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), + /* all allowed sizes */ + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 2), + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 4), + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 8), + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 12), + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 16), + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0some other int\0float"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8), + /* different name */ + BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8), + /* different encoding */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8), + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8), + /* different bit offset */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8), + /* different bit size */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8), + /* different byte size */ + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), + /* all allowed sizes */ + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 2), + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 4), + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 8), + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 12), + BTF_TYPE_FLOAT_ENC(NAME_NTH(3), 16), + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0some other int\0float"), + }, +}, +{ + .descr = "dedup: enum fwd resolution", + .input = { + .raw_types = { + /* [1] fwd enum 'e1' before full enum */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4), + /* [2] full enum 'e1' after fwd */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 123), + /* [3] full enum 'e2' before fwd */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(4), 456), + /* [4] fwd enum 'e2' after full enum */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4), + /* [5] incompatible fwd enum with different size */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1), + /* [6] incompatible full enum with different value */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 321), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"), + }, + .expect = { + .raw_types = { + /* [1] full enum 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 123), + /* [2] full enum 'e2' */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(4), 456), + /* [3] incompatible fwd enum with different size */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1), + /* [4] incompatible full enum with different value */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 321), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"), + }, +}, +{ + .descr = "dedup: datasec and vars pass-through", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* static int t */ + BTF_VAR_ENC(NAME_NTH(2), 1, 0), /* [2] */ + /* .bss section */ /* [3] */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(2, 0, 4), + /* int, referenced from [5] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */ + /* another static int t */ + BTF_VAR_ENC(NAME_NTH(2), 4, 0), /* [5] */ + /* another .bss section */ /* [6] */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(5, 0, 4), + BTF_END_RAW, + }, + BTF_STR_SEC("\0.bss\0t"), + }, + .expect = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* static int t */ + BTF_VAR_ENC(NAME_NTH(2), 1, 0), /* [2] */ + /* .bss section */ /* [3] */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(2, 0, 4), + /* another static int t */ + BTF_VAR_ENC(NAME_NTH(2), 1, 0), /* [4] */ + /* another .bss section */ /* [5] */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(4, 0, 4), + BTF_END_RAW, + }, + BTF_STR_SEC("\0.bss\0t"), + }, + .opts = { + .force_collisions = true + }, +}, +{ + .descr = "dedup: func/func_arg/var tags", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* static int t */ + BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */ + /* void f(int a1, int a2) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1), + BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */ + /* tag -> t */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */ + /* tag -> func */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [8] */ + /* tag -> func arg a1 */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [9] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [10] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1), + BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"), + }, +}, +{ + .descr = "dedup: func/func_param tags", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* void f(int a1, int a2) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */ + /* void f(int a1, int a2) */ + BTF_FUNC_PROTO_ENC(0, 2), /* [4] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_ENC(NAME_NTH(3), 4), /* [5] */ + /* tag -> f: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [7] */ + /* tag -> f/a2: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [9] */ + /* tag -> f: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 5, -1), /* [10] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 5, -1), /* [11] */ + /* tag -> f/a2: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 5, 1), /* [12] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 5, 1), /* [13] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), + BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 3, 1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [9] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"), + }, +}, +{ + .descr = "dedup: struct/struct_member tags", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), + BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), + BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), + /* tag -> t: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ + /* tag -> t/m2: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ + /* tag -> t: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [8] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, -1), /* [9] */ + /* tag -> t/m2: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, 1), /* [10] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 3, 1), /* [11] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), + BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [3] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 2, -1), /* [5] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ + BTF_DECL_TAG_ENC(NAME_NTH(6), 2, 1), /* [8] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), + }, +}, +{ + .descr = "dedup: typedef tags", + .input = { + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [3] */ + /* tag -> t: tag1, tag2 */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [5] */ + /* tag -> t: tag1, tag3 */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 3, -1), /* [6] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 3, -1), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_DECL_TAG_ENC(NAME_NTH(2), 2, -1), /* [3] */ + BTF_DECL_TAG_ENC(NAME_NTH(3), 2, -1), /* [4] */ + BTF_DECL_TAG_ENC(NAME_NTH(4), 2, -1), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"), + }, +}, +{ + .descr = "dedup: btf_type_tag #1", + .input = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [5] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 5), /* [6] */ + BTF_PTR_ENC(6), /* [7] */ + /* ptr -> tag1 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [8] */ + BTF_PTR_ENC(8), /* [9] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, + .expect = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag1 -> int */ + BTF_PTR_ENC(2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, +}, +{ + .descr = "dedup: btf_type_tag #2", + .input = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag2 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */ + BTF_PTR_ENC(5), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, + .expect = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag2 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */ + BTF_PTR_ENC(5), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, +}, +{ + .descr = "dedup: btf_type_tag #3", + .input = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag1 -> tag2 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */ + BTF_PTR_ENC(6), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, + .expect = { + .raw_types = { + /* ptr -> tag2 -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */ + BTF_PTR_ENC(3), /* [4] */ + /* ptr -> tag1 -> tag2 -> int */ + BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */ + BTF_PTR_ENC(6), /* [7] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0tag2"), + }, +}, +{ + .descr = "dedup: btf_type_tag #4", + .input = { + .raw_types = { + /* ptr -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_PTR_ENC(2), /* [3] */ + /* ptr -> tag1 -> long */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */ + BTF_PTR_ENC(5), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1"), + }, + .expect = { + .raw_types = { + /* ptr -> tag1 -> int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_PTR_ENC(2), /* [3] */ + /* ptr -> tag1 -> long */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */ + BTF_PTR_ENC(5), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1"), + }, +}, +{ + .descr = "dedup: btf_type_tag #5, struct", + .input = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [4] */ + BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [5] */ + BTF_MEMBER_ENC(NAME_NTH(3), 4, BTF_MEMBER_OFFSET(0, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0t\0m"), + }, + .expect = { + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */ + BTF_TYPE_ENC(NAME_NTH(2), BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 4), /* [3] */ + BTF_MEMBER_ENC(NAME_NTH(3), 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag1\0t\0m"), + }, +}, +{ + .descr = "dedup: enum64, standalone", + .input = { + .raw_types = { + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(2), 1, 123), + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(2), 1, 123), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val"), + }, + .expect = { + .raw_types = { + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(2), 1, 123), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val"), + }, +}, +{ + .descr = "dedup: enum64, fwd resolution", + .input = { + .raw_types = { + /* [1] fwd enum64 'e1' before full enum */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8), + /* [2] full enum64 'e1' after fwd */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(2), 1, 123), + /* [3] full enum64 'e2' before fwd */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(4), 0, 456), + /* [4] fwd enum64 'e2' after full enum */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8), + /* [5] incompatible full enum64 with different value */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(2), 0, 321), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"), + }, + .expect = { + .raw_types = { + /* [1] full enum64 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(2), 1, 123), + /* [2] full enum64 'e2' */ + BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(4), 0, 456), + /* [3] incompatible full enum64 with different value */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8), + BTF_ENUM64_ENC(NAME_NTH(2), 0, 321), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"), + }, +}, +{ + .descr = "dedup: enum and enum64, no dedup", + .input = { + .raw_types = { + /* [1] enum 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 1), + /* [2] enum64 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4), + BTF_ENUM64_ENC(NAME_NTH(2), 1, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val"), + }, + .expect = { + .raw_types = { + /* [1] enum 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), + BTF_ENUM_ENC(NAME_NTH(2), 1), + /* [2] enum64 'e1' */ + BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4), + BTF_ENUM64_ENC(NAME_NTH(2), 1, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0e1\0e1_val"), + }, +}, + +}; + +static int btf_type_size(const struct btf_type *t) +{ + int base_size = sizeof(struct btf_type); + __u16 vlen = BTF_INFO_VLEN(t->info); + __u16 kind = BTF_INFO_KIND(t->info); + + switch (kind) { + case BTF_KIND_FWD: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_FLOAT: + case BTF_KIND_TYPE_TAG: + return base_size; + case BTF_KIND_INT: + return base_size + sizeof(__u32); + case BTF_KIND_ENUM: + return base_size + vlen * sizeof(struct btf_enum); + case BTF_KIND_ENUM64: + return base_size + vlen * sizeof(struct btf_enum64); + case BTF_KIND_ARRAY: + return base_size + sizeof(struct btf_array); + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + return base_size + vlen * sizeof(struct btf_member); + case BTF_KIND_FUNC_PROTO: + return base_size + vlen * sizeof(struct btf_param); + case BTF_KIND_VAR: + return base_size + sizeof(struct btf_var); + case BTF_KIND_DATASEC: + return base_size + vlen * sizeof(struct btf_var_secinfo); + case BTF_KIND_DECL_TAG: + return base_size + sizeof(struct btf_decl_tag); + default: + fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind); + return -EINVAL; + } +} + +static void dump_btf_strings(const char *strs, __u32 len) +{ + const char *cur = strs; + int i = 0; + + while (cur < strs + len) { + fprintf(stderr, "string #%d: '%s'\n", i, cur); + cur += strlen(cur) + 1; + i++; + } +} + +static void do_test_dedup(unsigned int test_num) +{ + struct btf_dedup_test *test = &dedup_tests[test_num - 1]; + __u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size; + const struct btf_header *test_hdr, *expect_hdr; + struct btf *test_btf = NULL, *expect_btf = NULL; + const void *test_btf_data, *expect_btf_data; + const char *ret_test_next_str, *ret_expect_next_str; + const char *test_strs, *expect_strs; + const char *test_str_cur; + const char *expect_str_cur, *expect_str_end; + unsigned int raw_btf_size; + void *raw_btf; + int err = 0, i; + + if (!test__start_subtest(test->descr)) + return; + + raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types, + test->input.str_sec, test->input.str_sec_size, + &raw_btf_size, &ret_test_next_str); + if (!raw_btf) + return; + + test_btf = btf__new((__u8 *)raw_btf, raw_btf_size); + err = libbpf_get_error(test_btf); + free(raw_btf); + if (CHECK(err, "invalid test_btf errno:%d", err)) { + err = -1; + goto done; + } + + raw_btf = btf_raw_create(&hdr_tmpl, test->expect.raw_types, + test->expect.str_sec, + test->expect.str_sec_size, + &raw_btf_size, &ret_expect_next_str); + if (!raw_btf) + return; + expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size); + err = libbpf_get_error(expect_btf); + free(raw_btf); + if (CHECK(err, "invalid expect_btf errno:%d", err)) { + err = -1; + goto done; + } + + test->opts.sz = sizeof(test->opts); + err = btf__dedup(test_btf, &test->opts); + if (CHECK(err, "btf_dedup failed errno:%d", err)) { + err = -1; + goto done; + } + + test_btf_data = btf__raw_data(test_btf, &test_btf_size); + expect_btf_data = btf__raw_data(expect_btf, &expect_btf_size); + if (CHECK(test_btf_size != expect_btf_size, + "test_btf_size:%u != expect_btf_size:%u", + test_btf_size, expect_btf_size)) { + err = -1; + goto done; + } + + test_hdr = test_btf_data; + test_strs = test_btf_data + sizeof(*test_hdr) + test_hdr->str_off; + expect_hdr = expect_btf_data; + expect_strs = expect_btf_data + sizeof(*test_hdr) + expect_hdr->str_off; + if (CHECK(test_hdr->str_len != expect_hdr->str_len, + "test_hdr->str_len:%u != expect_hdr->str_len:%u", + test_hdr->str_len, expect_hdr->str_len)) { + fprintf(stderr, "\ntest strings:\n"); + dump_btf_strings(test_strs, test_hdr->str_len); + fprintf(stderr, "\nexpected strings:\n"); + dump_btf_strings(expect_strs, expect_hdr->str_len); + err = -1; + goto done; + } + + expect_str_cur = expect_strs; + expect_str_end = expect_strs + expect_hdr->str_len; + while (expect_str_cur < expect_str_end) { + size_t test_len, expect_len; + int off; + + off = btf__find_str(test_btf, expect_str_cur); + if (CHECK(off < 0, "exp str '%s' not found: %d\n", expect_str_cur, off)) { + err = -1; + goto done; + } + test_str_cur = btf__str_by_offset(test_btf, off); + + test_len = strlen(test_str_cur); + expect_len = strlen(expect_str_cur); + if (CHECK(test_len != expect_len, + "test_len:%zu != expect_len:%zu " + "(test_str:%s, expect_str:%s)", + test_len, expect_len, test_str_cur, expect_str_cur)) { + err = -1; + goto done; + } + if (CHECK(strcmp(test_str_cur, expect_str_cur), + "test_str:%s != expect_str:%s", + test_str_cur, expect_str_cur)) { + err = -1; + goto done; + } + expect_str_cur += expect_len + 1; + } + + test_nr_types = btf__type_cnt(test_btf); + expect_nr_types = btf__type_cnt(expect_btf); + if (CHECK(test_nr_types != expect_nr_types, + "test_nr_types:%u != expect_nr_types:%u", + test_nr_types, expect_nr_types)) { + err = -1; + goto done; + } + + for (i = 1; i < test_nr_types; i++) { + const struct btf_type *test_type, *expect_type; + int test_size, expect_size; + + test_type = btf__type_by_id(test_btf, i); + expect_type = btf__type_by_id(expect_btf, i); + test_size = btf_type_size(test_type); + expect_size = btf_type_size(expect_type); + + if (CHECK(test_size != expect_size, + "type #%d: test_size:%d != expect_size:%u", + i, test_size, expect_size)) { + err = -1; + goto done; + } + if (CHECK(btf_kind(test_type) != btf_kind(expect_type), + "type %d kind: exp %d != got %u\n", + i, btf_kind(expect_type), btf_kind(test_type))) { + err = -1; + goto done; + } + if (CHECK(test_type->info != expect_type->info, + "type %d info: exp %d != got %u\n", + i, expect_type->info, test_type->info)) { + err = -1; + goto done; + } + if (CHECK(test_type->size != expect_type->size, + "type %d size/type: exp %d != got %u\n", + i, expect_type->size, test_type->size)) { + err = -1; + goto done; + } + } + +done: + btf__free(test_btf); + btf__free(expect_btf); +} + +void test_btf(void) +{ + int i; + + always_log = env.verbosity > VERBOSE_NONE; + + for (i = 1; i <= ARRAY_SIZE(raw_tests); i++) + do_test_raw(i); + for (i = 1; i <= ARRAY_SIZE(get_info_tests); i++) + do_test_get_info(i); + for (i = 1; i <= ARRAY_SIZE(file_tests); i++) + do_test_file(i); + for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++) + do_test_info_raw(i); + for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++) + do_test_dedup(i); + test_pprint(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c new file mode 100644 index 000000000..90aac4375 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> +#include <bpf/btf.h> +#include "btf_helpers.h" + +static void test_split_simple() { + const struct btf_type *t; + struct btf *btf1, *btf2; + int str_off, err; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */ + + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(btf1, 1); /* [2] ptr to int */ + btf__add_struct(btf1, "s1", 4); /* [3] struct s1 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0"); + + ASSERT_STREQ(btf_type_c_dump(btf1), "\ +struct s1 {\n\ + int f1;\n\ +};\n\n", "c_dump"); + + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + /* pointer size should be "inherited" from main BTF */ + ASSERT_EQ(btf__pointer_size(btf2), 8, "inherit_ptr_sz"); + + str_off = btf__find_str(btf2, "int"); + ASSERT_NEQ(str_off, -ENOENT, "str_int_missing"); + + t = btf__type_by_id(btf2, 1); + if (!ASSERT_OK_PTR(t, "int_type")) + goto cleanup; + ASSERT_EQ(btf_is_int(t), true, "int_kind"); + ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "int", "int_name"); + + btf__add_struct(btf2, "s2", 16); /* [4] struct s2 { */ + btf__add_field(btf2, "f1", 6, 0, 0); /* struct s1 f1; */ + btf__add_field(btf2, "f2", 5, 32, 0); /* int f2; */ + btf__add_field(btf2, "f3", 2, 64, 0); /* int *f3; */ + /* } */ + + /* duplicated int */ + btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [5] int */ + + /* duplicated struct s1 */ + btf__add_struct(btf2, "s1", 4); /* [6] struct s1 { */ + btf__add_field(btf2, "f1", 5, 0, 0); /* int f1; */ + /* } */ + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[4] STRUCT 's2' size=16 vlen=3\n" + "\t'f1' type_id=6 bits_offset=0\n" + "\t'f2' type_id=5 bits_offset=32\n" + "\t'f3' type_id=2 bits_offset=64", + "[5] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[6] STRUCT 's1' size=4 vlen=1\n" + "\t'f1' type_id=5 bits_offset=0"); + + ASSERT_STREQ(btf_type_c_dump(btf2), "\ +struct s1 {\n\ + int f1;\n\ +};\n\ +\n\ +struct s1___2 {\n\ + int f1;\n\ +};\n\ +\n\ +struct s2 {\n\ + struct s1___2 f1;\n\ + int f2;\n\ + int *f3;\n\ +};\n\n", "c_dump"); + + err = btf__dedup(btf2, NULL); + if (!ASSERT_OK(err, "btf_dedup")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[4] STRUCT 's2' size=16 vlen=3\n" + "\t'f1' type_id=3 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32\n" + "\t'f3' type_id=2 bits_offset=64"); + + ASSERT_STREQ(btf_type_c_dump(btf2), "\ +struct s1 {\n\ + int f1;\n\ +};\n\ +\n\ +struct s2 {\n\ + struct s1 f1;\n\ + int f2;\n\ + int *f3;\n\ +};\n\n", "c_dump"); + +cleanup: + btf__free(btf2); + btf__free(btf1); +} + +static void test_split_fwd_resolve() { + struct btf *btf1, *btf2; + int err; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */ + + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(btf1, 4); /* [2] ptr to struct s1 */ + btf__add_ptr(btf1, 5); /* [3] ptr to struct s2 */ + btf__add_struct(btf1, "s1", 16); /* [4] struct s1 { */ + btf__add_field(btf1, "f1", 2, 0, 0); /* struct s1 *f1; */ + btf__add_field(btf1, "f2", 3, 64, 0); /* struct s2 *f2; */ + /* } */ + btf__add_struct(btf1, "s2", 4); /* [5] struct s2 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=4", + "[3] PTR '(anon)' type_id=5", + "[4] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=2 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=64", + "[5] STRUCT 's2' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0"); + + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [6] int */ + btf__add_ptr(btf2, 10); /* [7] ptr to struct s1 */ + btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [8] fwd for struct s2 */ + btf__add_ptr(btf2, 8); /* [9] ptr to fwd struct s2 */ + btf__add_struct(btf2, "s1", 16); /* [10] struct s1 { */ + btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */ + btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */ + /* } */ + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=4", + "[3] PTR '(anon)' type_id=5", + "[4] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=2 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=64", + "[5] STRUCT 's2' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[6] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[7] PTR '(anon)' type_id=10", + "[8] FWD 's2' fwd_kind=struct", + "[9] PTR '(anon)' type_id=8", + "[10] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=7 bits_offset=0\n" + "\t'f2' type_id=9 bits_offset=64"); + + err = btf__dedup(btf2, NULL); + if (!ASSERT_OK(err, "btf_dedup")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=4", + "[3] PTR '(anon)' type_id=5", + "[4] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=2 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=64", + "[5] STRUCT 's2' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0"); + +cleanup: + btf__free(btf2); + btf__free(btf1); +} + +static void test_split_struct_duped() { + struct btf *btf1, *btf2; + int err; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */ + + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(btf1, 5); /* [2] ptr to struct s1 */ + btf__add_fwd(btf1, "s2", BTF_FWD_STRUCT); /* [3] fwd for struct s2 */ + btf__add_ptr(btf1, 3); /* [4] ptr to fwd struct s2 */ + btf__add_struct(btf1, "s1", 16); /* [5] struct s1 { */ + btf__add_field(btf1, "f1", 2, 0, 0); /* struct s1 *f1; */ + btf__add_field(btf1, "f2", 4, 64, 0); /* struct s2 *f2; */ + /* } */ + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=5", + "[3] FWD 's2' fwd_kind=struct", + "[4] PTR '(anon)' type_id=3", + "[5] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=2 bits_offset=0\n" + "\t'f2' type_id=4 bits_offset=64"); + + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + btf__add_int(btf2, "int", 4, BTF_INT_SIGNED); /* [6] int */ + btf__add_ptr(btf2, 10); /* [7] ptr to struct s1 */ + btf__add_fwd(btf2, "s2", BTF_FWD_STRUCT); /* [8] fwd for struct s2 */ + btf__add_ptr(btf2, 11); /* [9] ptr to struct s2 */ + btf__add_struct(btf2, "s1", 16); /* [10] struct s1 { */ + btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */ + btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */ + /* } */ + btf__add_struct(btf2, "s2", 40); /* [11] struct s2 { */ + btf__add_field(btf2, "f1", 7, 0, 0); /* struct s1 *f1; */ + btf__add_field(btf2, "f2", 9, 64, 0); /* struct s2 *f2; */ + btf__add_field(btf2, "f3", 6, 128, 0); /* int f3; */ + btf__add_field(btf2, "f4", 10, 192, 0); /* struct s1 f4; */ + /* } */ + btf__add_ptr(btf2, 8); /* [12] ptr to fwd struct s2 */ + btf__add_struct(btf2, "s3", 8); /* [13] struct s3 { */ + btf__add_field(btf2, "f1", 12, 0, 0); /* struct s2 *f1; (fwd) */ + /* } */ + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=5", + "[3] FWD 's2' fwd_kind=struct", + "[4] PTR '(anon)' type_id=3", + "[5] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=2 bits_offset=0\n" + "\t'f2' type_id=4 bits_offset=64", + "[6] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[7] PTR '(anon)' type_id=10", + "[8] FWD 's2' fwd_kind=struct", + "[9] PTR '(anon)' type_id=11", + "[10] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=7 bits_offset=0\n" + "\t'f2' type_id=9 bits_offset=64", + "[11] STRUCT 's2' size=40 vlen=4\n" + "\t'f1' type_id=7 bits_offset=0\n" + "\t'f2' type_id=9 bits_offset=64\n" + "\t'f3' type_id=6 bits_offset=128\n" + "\t'f4' type_id=10 bits_offset=192", + "[12] PTR '(anon)' type_id=8", + "[13] STRUCT 's3' size=8 vlen=1\n" + "\t'f1' type_id=12 bits_offset=0"); + + err = btf__dedup(btf2, NULL); + if (!ASSERT_OK(err, "btf_dedup")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=5", + "[3] FWD 's2' fwd_kind=struct", + "[4] PTR '(anon)' type_id=3", + "[5] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=2 bits_offset=0\n" + "\t'f2' type_id=4 bits_offset=64", + "[6] PTR '(anon)' type_id=8", + "[7] PTR '(anon)' type_id=9", + "[8] STRUCT 's1' size=16 vlen=2\n" + "\t'f1' type_id=6 bits_offset=0\n" + "\t'f2' type_id=7 bits_offset=64", + "[9] STRUCT 's2' size=40 vlen=4\n" + "\t'f1' type_id=6 bits_offset=0\n" + "\t'f2' type_id=7 bits_offset=64\n" + "\t'f3' type_id=1 bits_offset=128\n" + "\t'f4' type_id=8 bits_offset=192", + "[10] STRUCT 's3' size=8 vlen=1\n" + "\t'f1' type_id=7 bits_offset=0"); + +cleanup: + btf__free(btf2); + btf__free(btf1); +} + +static void btf_add_dup_struct_in_cu(struct btf *btf, int start_id) +{ +#define ID(n) (start_id + n) + btf__set_pointer_size(btf, 8); /* enforce 64-bit arch */ + + btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + + btf__add_struct(btf, "s", 8); /* [2] struct s { */ + btf__add_field(btf, "a", ID(3), 0, 0); /* struct anon a; */ + btf__add_field(btf, "b", ID(4), 0, 0); /* struct anon b; */ + /* } */ + + btf__add_struct(btf, "(anon)", 8); /* [3] struct anon { */ + btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */ + btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */ + /* } */ + + btf__add_struct(btf, "(anon)", 8); /* [4] struct anon { */ + btf__add_field(btf, "f1", ID(1), 0, 0); /* int f1; */ + btf__add_field(btf, "f2", ID(1), 32, 0); /* int f2; */ + /* } */ +#undef ID +} + +static void test_split_dup_struct_in_cu() +{ + struct btf *btf1, *btf2 = NULL; + int err; + + /* generate the base data.. */ + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf_add_dup_struct_in_cu(btf1, 0); + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=4 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32", + "[4] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32"); + + /* ..dedup them... */ + err = btf__dedup(btf1, NULL); + if (!ASSERT_OK(err, "btf_dedup")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=3 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32"); + + /* and add the same data on top of it */ + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + btf_add_dup_struct_in_cu(btf2, 3); + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=3 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32", + "[4] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[5] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=6 bits_offset=0\n" + "\t'b' type_id=7 bits_offset=0", + "[6] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=4 bits_offset=0\n" + "\t'f2' type_id=4 bits_offset=32", + "[7] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=4 bits_offset=0\n" + "\t'f2' type_id=4 bits_offset=32"); + + err = btf__dedup(btf2, NULL); + if (!ASSERT_OK(err, "btf_dedup")) + goto cleanup; + + /* after dedup it should match the original data */ + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's' size=8 vlen=2\n" + "\t'a' type_id=3 bits_offset=0\n" + "\t'b' type_id=3 bits_offset=0", + "[3] STRUCT '(anon)' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32"); + +cleanup: + btf__free(btf2); + btf__free(btf1); +} + +void test_btf_dedup_split() +{ + if (test__start_subtest("split_simple")) + test_split_simple(); + if (test__start_subtest("split_struct_duped")) + test_split_struct_duped(); + if (test__start_subtest("split_fwd_resolve")) + test_split_fwd_resolve(); + if (test__start_subtest("split_dup_struct_in_cu")) + test_split_dup_struct_in_cu(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c new file mode 100644 index 000000000..24da33548 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -0,0 +1,905 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <bpf/btf.h> + +static int duration = 0; + +void btf_dump_printf(void *ctx, const char *fmt, va_list args) +{ + vfprintf(ctx, fmt, args); +} + +static struct btf_dump_test_case { + const char *name; + const char *file; + bool known_ptr_sz; +} btf_dump_test_cases[] = { + {"btf_dump: syntax", "btf_dump_test_case_syntax", true}, + {"btf_dump: ordering", "btf_dump_test_case_ordering", false}, + {"btf_dump: padding", "btf_dump_test_case_padding", true}, + {"btf_dump: packing", "btf_dump_test_case_packing", true}, + {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true}, + {"btf_dump: multidim", "btf_dump_test_case_multidim", false}, + {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false}, +}; + +static int btf_dump_all_types(const struct btf *btf, void *ctx) +{ + size_t type_cnt = btf__type_cnt(btf); + struct btf_dump *d; + int err = 0, id; + + d = btf_dump__new(btf, btf_dump_printf, ctx, NULL); + err = libbpf_get_error(d); + if (err) + return err; + + for (id = 1; id < type_cnt; id++) { + err = btf_dump__dump_type(d, id); + if (err) + goto done; + } + +done: + btf_dump__free(d); + return err; +} + +static int test_btf_dump_case(int n, struct btf_dump_test_case *t) +{ + char test_file[256], out_file[256], diff_cmd[1024]; + struct btf *btf = NULL; + int err = 0, fd = -1; + FILE *f = NULL; + + snprintf(test_file, sizeof(test_file), "%s.bpf.o", t->file); + + btf = btf__parse_elf(test_file, NULL); + if (!ASSERT_OK_PTR(btf, "btf_parse_elf")) { + err = -PTR_ERR(btf); + btf = NULL; + goto done; + } + + /* tests with t->known_ptr_sz have no "long" or "unsigned long" type, + * so it's impossible to determine correct pointer size; but if they + * do, it should be 8 regardless of host architecture, becaues BPF + * target is always 64-bit + */ + if (!t->known_ptr_sz) { + btf__set_pointer_size(btf, 8); + } else { + CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n", + 8, btf__pointer_size(btf)); + } + + snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file); + fd = mkstemp(out_file); + if (!ASSERT_GE(fd, 0, "create_tmp")) { + err = fd; + goto done; + } + f = fdopen(fd, "w"); + if (CHECK(f == NULL, "open_tmp", "failed to open file: %s(%d)\n", + strerror(errno), errno)) { + close(fd); + goto done; + } + + err = btf_dump_all_types(btf, f); + fclose(f); + close(fd); + if (CHECK(err, "btf_dump", "failure during C dumping: %d\n", err)) { + goto done; + } + + snprintf(test_file, sizeof(test_file), "progs/%s.c", t->file); + if (access(test_file, R_OK) == -1) + /* + * When the test is run with O=, kselftest copies TEST_FILES + * without preserving the directory structure. + */ + snprintf(test_file, sizeof(test_file), "%s.c", t->file); + /* + * Diff test output and expected test output, contained between + * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. + * For expected output lines, everything before '*' is stripped out. + * Also lines containing comment start and comment end markers are + * ignored. + */ + snprintf(diff_cmd, sizeof(diff_cmd), + "awk '/START-EXPECTED-OUTPUT/{out=1;next} " + "/END-EXPECTED-OUTPUT/{out=0} " + "/\\/\\*|\\*\\//{next} " /* ignore comment start/end lines */ + "out {sub(/^[ \\t]*\\*/, \"\"); print}' '%s' | diff -u - '%s'", + test_file, out_file); + err = system(diff_cmd); + if (CHECK(err, "diff", + "differing test output, output=%s, err=%d, diff cmd:\n%s\n", + out_file, err, diff_cmd)) + goto done; + + remove(out_file); + +done: + btf__free(btf); + return err; +} + +static char *dump_buf; +static size_t dump_buf_sz; +static FILE *dump_buf_file; + +static void test_btf_dump_incremental(void) +{ + struct btf *btf = NULL; + struct btf_dump *d = NULL; + int id, err, i; + + dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz); + if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream")) + return; + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "new_empty")) + goto err_out; + d = btf_dump__new(btf, btf_dump_printf, dump_buf_file, NULL); + if (!ASSERT_OK(libbpf_get_error(d), "btf_dump__new")) + goto err_out; + + /* First, generate BTF corresponding to the following C code: + * + * enum x; + * + * enum x { X = 1 }; + * + * enum { Y = 1 }; + * + * struct s; + * + * struct s { int x; }; + * + */ + id = btf__add_enum(btf, "x", 4); + ASSERT_EQ(id, 1, "enum_declaration_id"); + id = btf__add_enum(btf, "x", 4); + ASSERT_EQ(id, 2, "named_enum_id"); + err = btf__add_enum_value(btf, "X", 1); + ASSERT_OK(err, "named_enum_val_ok"); + + id = btf__add_enum(btf, NULL, 4); + ASSERT_EQ(id, 3, "anon_enum_id"); + err = btf__add_enum_value(btf, "Y", 1); + ASSERT_OK(err, "anon_enum_val_ok"); + + id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED); + ASSERT_EQ(id, 4, "int_id"); + + id = btf__add_fwd(btf, "s", BTF_FWD_STRUCT); + ASSERT_EQ(id, 5, "fwd_id"); + + id = btf__add_struct(btf, "s", 4); + ASSERT_EQ(id, 6, "struct_id"); + err = btf__add_field(btf, "x", 4, 0, 0); + ASSERT_OK(err, "field_ok"); + + for (i = 1; i < btf__type_cnt(btf); i++) { + err = btf_dump__dump_type(d, i); + ASSERT_OK(err, "dump_type_ok"); + } + + fflush(dump_buf_file); + dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ + + ASSERT_STREQ(dump_buf, +"enum x;\n" +"\n" +"enum x {\n" +" X = 1,\n" +"};\n" +"\n" +"enum {\n" +" Y = 1,\n" +"};\n" +"\n" +"struct s;\n" +"\n" +"struct s {\n" +" int x;\n" +"};\n\n", "c_dump1"); + + /* Now, after dumping original BTF, append another struct that embeds + * anonymous enum. It also has a name conflict with the first struct: + * + * struct s___2 { + * enum { VAL___2 = 1 } x; + * struct s s; + * }; + * + * This will test that btf_dump'er maintains internal state properly. + * Note that VAL___2 enum value. It's because we've already emitted + * that enum as a global anonymous enum, so btf_dump will ensure that + * enum values don't conflict; + * + */ + fseek(dump_buf_file, 0, SEEK_SET); + + id = btf__add_struct(btf, "s", 4); + ASSERT_EQ(id, 7, "struct_id"); + err = btf__add_field(btf, "x", 2, 0, 0); + ASSERT_OK(err, "field_ok"); + err = btf__add_field(btf, "y", 3, 32, 0); + ASSERT_OK(err, "field_ok"); + err = btf__add_field(btf, "s", 6, 64, 0); + ASSERT_OK(err, "field_ok"); + + for (i = 1; i < btf__type_cnt(btf); i++) { + err = btf_dump__dump_type(d, i); + ASSERT_OK(err, "dump_type_ok"); + } + + fflush(dump_buf_file); + dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ + ASSERT_STREQ(dump_buf, +"struct s___2 {\n" +" enum x x;\n" +" enum {\n" +" Y___2 = 1,\n" +" } y;\n" +" struct s s;\n" +"};\n\n" , "c_dump1"); + +err_out: + fclose(dump_buf_file); + free(dump_buf); + btf_dump__free(d); + btf__free(btf); +} + +#define STRSIZE 4096 + +static void btf_dump_snprintf(void *ctx, const char *fmt, va_list args) +{ + char *s = ctx, new[STRSIZE]; + + vsnprintf(new, STRSIZE, fmt, args); + if (strlen(s) < STRSIZE) + strncat(s, new, STRSIZE - strlen(s) - 1); +} + +static int btf_dump_data(struct btf *btf, struct btf_dump *d, + char *name, char *prefix, __u64 flags, void *ptr, + size_t ptr_sz, char *str, const char *expected_val) +{ + DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts); + size_t type_sz; + __s32 type_id; + int ret = 0; + + if (flags & BTF_F_COMPACT) + opts.compact = true; + if (flags & BTF_F_NONAME) + opts.skip_names = true; + if (flags & BTF_F_ZERO) + opts.emit_zeroes = true; + if (prefix) { + ASSERT_STRNEQ(name, prefix, strlen(prefix), + "verify prefix match"); + name += strlen(prefix) + 1; + } + type_id = btf__find_by_name(btf, name); + if (!ASSERT_GE(type_id, 0, "find type id")) + return -ENOENT; + type_sz = btf__resolve_size(btf, type_id); + str[0] = '\0'; + ret = btf_dump__dump_type_data(d, type_id, ptr, ptr_sz, &opts); + if (type_sz <= ptr_sz) { + if (!ASSERT_EQ(ret, type_sz, "failed/unexpected type_sz")) + return -EINVAL; + } else { + if (!ASSERT_EQ(ret, -E2BIG, "failed to return -E2BIG")) + return -EINVAL; + } + if (!ASSERT_STREQ(str, expected_val, "ensure expected/actual match")) + return -EFAULT; + return 0; +} + +#define TEST_BTF_DUMP_DATA(_b, _d, _prefix, _str, _type, _flags, \ + _expected, ...) \ + do { \ + char __ptrtype[64] = #_type; \ + char *_ptrtype = (char *)__ptrtype; \ + _type _ptrdata = __VA_ARGS__; \ + void *_ptr = &_ptrdata; \ + \ + (void) btf_dump_data(_b, _d, _ptrtype, _prefix, _flags, \ + _ptr, sizeof(_type), _str, \ + _expected); \ + } while (0) + +/* Use where expected data string matches its stringified declaration */ +#define TEST_BTF_DUMP_DATA_C(_b, _d, _prefix, _str, _type, _flags, \ + ...) \ + TEST_BTF_DUMP_DATA(_b, _d, _prefix, _str, _type, _flags, \ + "(" #_type ")" #__VA_ARGS__, __VA_ARGS__) + +/* overflow test; pass typesize < expected type size, ensure E2BIG returned */ +#define TEST_BTF_DUMP_DATA_OVER(_b, _d, _prefix, _str, _type, _type_sz, \ + _expected, ...) \ + do { \ + char __ptrtype[64] = #_type; \ + char *_ptrtype = (char *)__ptrtype; \ + _type _ptrdata = __VA_ARGS__; \ + void *_ptr = &_ptrdata; \ + \ + (void) btf_dump_data(_b, _d, _ptrtype, _prefix, 0, \ + _ptr, _type_sz, _str, _expected); \ + } while (0) + +#define TEST_BTF_DUMP_VAR(_b, _d, _prefix, _str, _var, _type, _flags, \ + _expected, ...) \ + do { \ + _type _ptrdata = __VA_ARGS__; \ + void *_ptr = &_ptrdata; \ + \ + (void) btf_dump_data(_b, _d, _var, _prefix, _flags, \ + _ptr, sizeof(_type), _str, \ + _expected); \ + } while (0) + +static void test_btf_dump_int_data(struct btf *btf, struct btf_dump *d, + char *str) +{ +#ifdef __SIZEOF_INT128__ + unsigned __int128 i = 0xffffffffffffffff; + + /* this dance is required because we cannot directly initialize + * a 128-bit value to anything larger than a 64-bit value. + */ + i = (i << 64) | (i - 1); +#endif + /* simple int */ + TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, int, BTF_F_COMPACT, 1234); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME, + "1234", 1234); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, 0, "(int)1234", 1234); + + /* zero value should be printed at toplevel */ + TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT, "(int)0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME, + "0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_ZERO, + "(int)0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, + BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, + "0", 0); + TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, int, BTF_F_COMPACT, -4567); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, BTF_F_COMPACT | BTF_F_NONAME, + "-4567", -4567); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, int, 0, "(int)-4567", -4567); + + TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, int, sizeof(int)-1, "", 1); + +#ifdef __SIZEOF_INT128__ + /* gcc encode unsigned __int128 type with name "__int128 unsigned" in dwarf, + * and clang encode it with name "unsigned __int128" in dwarf. + * Do an availability test for either variant before doing actual test. + */ + if (btf__find_by_name(btf, "unsigned __int128") > 0) { + TEST_BTF_DUMP_DATA(btf, d, NULL, str, unsigned __int128, BTF_F_COMPACT, + "(unsigned __int128)0xffffffffffffffff", + 0xffffffffffffffff); + ASSERT_OK(btf_dump_data(btf, d, "unsigned __int128", NULL, 0, &i, 16, str, + "(unsigned __int128)0xfffffffffffffffffffffffffffffffe"), + "dump unsigned __int128"); + } else if (btf__find_by_name(btf, "__int128 unsigned") > 0) { + TEST_BTF_DUMP_DATA(btf, d, NULL, str, __int128 unsigned, BTF_F_COMPACT, + "(__int128 unsigned)0xffffffffffffffff", + 0xffffffffffffffff); + ASSERT_OK(btf_dump_data(btf, d, "__int128 unsigned", NULL, 0, &i, 16, str, + "(__int128 unsigned)0xfffffffffffffffffffffffffffffffe"), + "dump unsigned __int128"); + } else { + ASSERT_TRUE(false, "unsigned_int128_not_found"); + } +#endif +} + +static void test_btf_dump_float_data(struct btf *btf, struct btf_dump *d, + char *str) +{ + float t1 = 1.234567; + float t2 = -1.234567; + float t3 = 0.0; + double t4 = 5.678912; + double t5 = -5.678912; + double t6 = 0.0; + long double t7 = 9.876543; + long double t8 = -9.876543; + long double t9 = 0.0; + + /* since the kernel does not likely have any float types in its BTF, we + * will need to add some of various sizes. + */ + + ASSERT_GT(btf__add_float(btf, "test_float", 4), 0, "add float"); + ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t1, 4, str, + "(test_float)1.234567"), "dump float"); + ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t2, 4, str, + "(test_float)-1.234567"), "dump float"); + ASSERT_OK(btf_dump_data(btf, d, "test_float", NULL, 0, &t3, 4, str, + "(test_float)0.000000"), "dump float"); + + ASSERT_GT(btf__add_float(btf, "test_double", 8), 0, "add_double"); + ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t4, 8, str, + "(test_double)5.678912"), "dump double"); + ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t5, 8, str, + "(test_double)-5.678912"), "dump double"); + ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t6, 8, str, + "(test_double)0.000000"), "dump double"); + + ASSERT_GT(btf__add_float(btf, "test_long_double", 16), 0, "add long double"); + ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t7, 16, + str, "(test_long_double)9.876543"), + "dump long_double"); + ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t8, 16, + str, "(test_long_double)-9.876543"), + "dump long_double"); + ASSERT_OK(btf_dump_data(btf, d, "test_long_double", NULL, 0, &t9, 16, + str, "(test_long_double)0.000000"), + "dump long_double"); +} + +static void test_btf_dump_char_data(struct btf *btf, struct btf_dump *d, + char *str) +{ + /* simple char */ + TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, char, BTF_F_COMPACT, 100); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME, + "100", 100); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, 0, "(char)100", 100); + /* zero value should be printed at toplevel */ + TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT, + "(char)0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME, + "0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_ZERO, + "(char)0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, + "0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, char, 0, "(char)0", 0); + + TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, char, sizeof(char)-1, "", 100); +} + +static void test_btf_dump_typedef_data(struct btf *btf, struct btf_dump *d, + char *str) +{ + /* simple typedef */ + TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, uint64_t, BTF_F_COMPACT, 100); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_NONAME, + "1", 1); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, 0, "(u64)1", 1); + /* zero value should be printed at toplevel */ + TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT, "(u64)0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_NONAME, + "0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, BTF_F_COMPACT | BTF_F_ZERO, + "(u64)0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, + BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, + "0", 0); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, u64, 0, "(u64)0", 0); + + /* typedef struct */ + TEST_BTF_DUMP_DATA_C(btf, d, NULL, str, atomic_t, BTF_F_COMPACT, + {.counter = (int)1,}); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_NONAME, + "{1,}", { .counter = 1 }); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, 0, +"(atomic_t){\n" +" .counter = (int)1,\n" +"}", + {.counter = 1,}); + /* typedef with 0 value should be printed at toplevel */ + TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT, "(atomic_t){}", + {.counter = 0,}); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_NONAME, + "{}", {.counter = 0,}); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, 0, +"(atomic_t){\n" +"}", + {.counter = 0,}); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_COMPACT | BTF_F_ZERO, + "(atomic_t){.counter = (int)0,}", + {.counter = 0,}); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, + BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, + "{0,}", {.counter = 0,}); + TEST_BTF_DUMP_DATA(btf, d, NULL, str, atomic_t, BTF_F_ZERO, +"(atomic_t){\n" +" .counter = (int)0,\n" +"}", + { .counter = 0,}); + + /* overflow should show type but not value since it overflows */ + TEST_BTF_DUMP_DATA_OVER(btf, d, NULL, str, atomic_t, sizeof(atomic_t)-1, + "(atomic_t){\n", { .counter = 1}); +} + +static void test_btf_dump_enum_data(struct btf *btf, struct btf_dump *d, + char *str) +{ + /* enum where enum value does (and does not) exist */ + TEST_BTF_DUMP_DATA_C(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT, + BPF_MAP_CREATE); + TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT, + "(enum bpf_cmd)BPF_MAP_CREATE", 0); + TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, + BTF_F_COMPACT | BTF_F_NONAME, + "BPF_MAP_CREATE", + BPF_MAP_CREATE); + TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, 0, + "(enum bpf_cmd)BPF_MAP_CREATE", + BPF_MAP_CREATE); + TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, + BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, + "BPF_MAP_CREATE", 0); + TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, + BTF_F_COMPACT | BTF_F_ZERO, + "(enum bpf_cmd)BPF_MAP_CREATE", + BPF_MAP_CREATE); + TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, + BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, + "BPF_MAP_CREATE", BPF_MAP_CREATE); + TEST_BTF_DUMP_DATA_C(btf, d, "enum", str, enum bpf_cmd, BTF_F_COMPACT, 2000); + TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, + BTF_F_COMPACT | BTF_F_NONAME, + "2000", 2000); + TEST_BTF_DUMP_DATA(btf, d, "enum", str, enum bpf_cmd, 0, + "(enum bpf_cmd)2000", 2000); + + TEST_BTF_DUMP_DATA_OVER(btf, d, "enum", str, enum bpf_cmd, + sizeof(enum bpf_cmd) - 1, "", BPF_MAP_CREATE); +} + +static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d, + char *str) +{ + DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts); + char zero_data[512] = { }; + char type_data[512]; + void *fops = type_data; + void *skb = type_data; + size_t type_sz; + __s32 type_id; + char *cmpstr; + int ret; + + memset(type_data, 255, sizeof(type_data)); + + /* simple struct */ + TEST_BTF_DUMP_DATA_C(btf, d, "struct", str, struct btf_enum, BTF_F_COMPACT, + {.name_off = (__u32)3,.val = (__s32)-1,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, + BTF_F_COMPACT | BTF_F_NONAME, + "{3,-1,}", + { .name_off = 3, .val = -1,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, 0, +"(struct btf_enum){\n" +" .name_off = (__u32)3,\n" +" .val = (__s32)-1,\n" +"}", + { .name_off = 3, .val = -1,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, + BTF_F_COMPACT | BTF_F_NONAME, + "{-1,}", + { .name_off = 0, .val = -1,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, + BTF_F_COMPACT | BTF_F_NONAME | BTF_F_ZERO, + "{0,-1,}", + { .name_off = 0, .val = -1,}); + /* empty struct should be printed */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, BTF_F_COMPACT, + "(struct btf_enum){}", + { .name_off = 0, .val = 0,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, + BTF_F_COMPACT | BTF_F_NONAME, + "{}", + { .name_off = 0, .val = 0,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, 0, +"(struct btf_enum){\n" +"}", + { .name_off = 0, .val = 0,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, + BTF_F_COMPACT | BTF_F_ZERO, + "(struct btf_enum){.name_off = (__u32)0,.val = (__s32)0,}", + { .name_off = 0, .val = 0,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct btf_enum, + BTF_F_ZERO, +"(struct btf_enum){\n" +" .name_off = (__u32)0,\n" +" .val = (__s32)0,\n" +"}", + { .name_off = 0, .val = 0,}); + + /* struct with pointers */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, BTF_F_COMPACT, + "(struct list_head){.next = (struct list_head *)0x1,}", + { .next = (struct list_head *)1 }); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, 0, +"(struct list_head){\n" +" .next = (struct list_head *)0x1,\n" +"}", + { .next = (struct list_head *)1 }); + /* NULL pointer should not be displayed */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, BTF_F_COMPACT, + "(struct list_head){}", + { .next = (struct list_head *)0 }); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct list_head, 0, +"(struct list_head){\n" +"}", + { .next = (struct list_head *)0 }); + + /* struct with function pointers */ + type_id = btf__find_by_name(btf, "file_operations"); + if (ASSERT_GT(type_id, 0, "find type id")) { + type_sz = btf__resolve_size(btf, type_id); + str[0] = '\0'; + + ret = btf_dump__dump_type_data(d, type_id, fops, type_sz, &opts); + ASSERT_EQ(ret, type_sz, + "unexpected return value dumping file_operations"); + cmpstr = +"(struct file_operations){\n" +" .owner = (struct module *)0xffffffffffffffff,\n" +" .llseek = (loff_t (*)(struct file *, loff_t, int))0xffffffffffffffff,"; + + ASSERT_STRNEQ(str, cmpstr, strlen(cmpstr), "file_operations"); + } + + /* struct with char array */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT, + "(struct bpf_prog_info){.name = (char[16])['f','o','o',],}", + { .name = "foo",}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, + BTF_F_COMPACT | BTF_F_NONAME, + "{['f','o','o',],}", + {.name = "foo",}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, 0, +"(struct bpf_prog_info){\n" +" .name = (char[16])[\n" +" 'f',\n" +" 'o',\n" +" 'o',\n" +" ],\n" +"}", + {.name = "foo",}); + /* leading null char means do not display string */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT, + "(struct bpf_prog_info){}", + {.name = {'\0', 'f', 'o', 'o'}}); + /* handle non-printable characters */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_prog_info, BTF_F_COMPACT, + "(struct bpf_prog_info){.name = (char[16])[1,2,3,],}", + { .name = {1, 2, 3, 0}}); + + /* struct with non-char array */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, BTF_F_COMPACT, + "(struct __sk_buff){.cb = (__u32[5])[1,2,3,4,5,],}", + { .cb = {1, 2, 3, 4, 5,},}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, + BTF_F_COMPACT | BTF_F_NONAME, + "{[1,2,3,4,5,],}", + { .cb = { 1, 2, 3, 4, 5},}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, 0, +"(struct __sk_buff){\n" +" .cb = (__u32[5])[\n" +" 1,\n" +" 2,\n" +" 3,\n" +" 4,\n" +" 5,\n" +" ],\n" +"}", + { .cb = { 1, 2, 3, 4, 5},}); + /* For non-char, arrays, show non-zero values only */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, BTF_F_COMPACT, + "(struct __sk_buff){.cb = (__u32[5])[0,0,1,0,0,],}", + { .cb = { 0, 0, 1, 0, 0},}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct __sk_buff, 0, +"(struct __sk_buff){\n" +" .cb = (__u32[5])[\n" +" 0,\n" +" 0,\n" +" 1,\n" +" 0,\n" +" 0,\n" +" ],\n" +"}", + { .cb = { 0, 0, 1, 0, 0},}); + + /* struct with bitfields */ + TEST_BTF_DUMP_DATA_C(btf, d, "struct", str, struct bpf_insn, BTF_F_COMPACT, + {.code = (__u8)1,.dst_reg = (__u8)0x2,.src_reg = (__u8)0x3,.off = (__s16)4,.imm = (__s32)5,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, + BTF_F_COMPACT | BTF_F_NONAME, + "{1,0x2,0x3,4,5,}", + { .code = 1, .dst_reg = 0x2, .src_reg = 0x3, .off = 4, + .imm = 5,}); + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, 0, +"(struct bpf_insn){\n" +" .code = (__u8)1,\n" +" .dst_reg = (__u8)0x2,\n" +" .src_reg = (__u8)0x3,\n" +" .off = (__s16)4,\n" +" .imm = (__s32)5,\n" +"}", + {.code = 1, .dst_reg = 2, .src_reg = 3, .off = 4, .imm = 5}); + + /* zeroed bitfields should not be displayed */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_insn, BTF_F_COMPACT, + "(struct bpf_insn){.dst_reg = (__u8)0x1,}", + { .code = 0, .dst_reg = 1}); + + /* struct with enum bitfield */ + type_id = btf__find_by_name(btf, "fs_context"); + if (ASSERT_GT(type_id, 0, "find fs_context")) { + type_sz = btf__resolve_size(btf, type_id); + str[0] = '\0'; + + opts.emit_zeroes = true; + ret = btf_dump__dump_type_data(d, type_id, zero_data, type_sz, &opts); + ASSERT_EQ(ret, type_sz, + "unexpected return value dumping fs_context"); + + ASSERT_NEQ(strstr(str, "FS_CONTEXT_FOR_MOUNT"), NULL, + "bitfield value not present"); + } + + /* struct with nested anon union */ + TEST_BTF_DUMP_DATA(btf, d, "struct", str, struct bpf_sock_ops, BTF_F_COMPACT, + "(struct bpf_sock_ops){.op = (__u32)1,(union){.args = (__u32[4])[1,2,3,4,],.reply = (__u32)1,.replylong = (__u32[4])[1,2,3,4,],},}", + { .op = 1, .args = { 1, 2, 3, 4}}); + + /* union with nested struct */ + TEST_BTF_DUMP_DATA(btf, d, "union", str, union bpf_iter_link_info, BTF_F_COMPACT, + "(union bpf_iter_link_info){.map = (struct){.map_fd = (__u32)1,},.cgroup = (struct){.order = (enum bpf_cgroup_iter_order)BPF_CGROUP_ITER_SELF_ONLY,.cgroup_fd = (__u32)1,},.task = (struct){.tid = (__u32)1,.pid = (__u32)1,},}", + { .cgroup = { .order = 1, .cgroup_fd = 1, }}); + + /* struct skb with nested structs/unions; because type output is so + * complex, we don't do a string comparison, just verify we return + * the type size as the amount of data displayed. + */ + type_id = btf__find_by_name(btf, "sk_buff"); + if (ASSERT_GT(type_id, 0, "find struct sk_buff")) { + type_sz = btf__resolve_size(btf, type_id); + str[0] = '\0'; + + ret = btf_dump__dump_type_data(d, type_id, skb, type_sz, &opts); + ASSERT_EQ(ret, type_sz, + "unexpected return value dumping sk_buff"); + } + + /* overflow bpf_sock_ops struct with final element nonzero/zero. + * Regardless of the value of the final field, we don't have all the + * data we need to display it, so we should trigger an overflow. + * In other words overflow checking should trump "is field zero?" + * checks because if we've overflowed, it shouldn't matter what the + * field is - we can't trust its value so shouldn't display it. + */ + TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops, + sizeof(struct bpf_sock_ops) - 1, + "(struct bpf_sock_ops){\n\t.op = (__u32)1,\n", + { .op = 1, .skb_tcp_flags = 2}); + TEST_BTF_DUMP_DATA_OVER(btf, d, "struct", str, struct bpf_sock_ops, + sizeof(struct bpf_sock_ops) - 1, + "(struct bpf_sock_ops){\n\t.op = (__u32)1,\n", + { .op = 1, .skb_tcp_flags = 0}); +} + +static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d, + char *str) +{ +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) + TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_number", int, BTF_F_COMPACT, + "int cpu_number = (int)100", 100); +#endif + TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_profile_flip", int, BTF_F_COMPACT, + "static int cpu_profile_flip = (int)2", 2); +} + +static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str, + const char *name, const char *expected_val, + void *data, size_t data_sz) +{ + DECLARE_LIBBPF_OPTS(btf_dump_type_data_opts, opts); + int ret = 0, cmp; + size_t secsize; + __s32 type_id; + + opts.compact = true; + + type_id = btf__find_by_name(btf, name); + if (!ASSERT_GT(type_id, 0, "find type id")) + return; + + secsize = btf__resolve_size(btf, type_id); + ASSERT_EQ(secsize, 0, "verify section size"); + + str[0] = '\0'; + ret = btf_dump__dump_type_data(d, type_id, data, data_sz, &opts); + ASSERT_EQ(ret, 0, "unexpected return value"); + + cmp = strcmp(str, expected_val); + ASSERT_EQ(cmp, 0, "ensure expected/actual match"); +} + +static void test_btf_dump_datasec_data(char *str) +{ + struct btf *btf; + char license[4] = "GPL"; + struct btf_dump *d; + + btf = btf__parse("xdping_kern.bpf.o", NULL); + if (!ASSERT_OK_PTR(btf, "xdping_kern.bpf.o BTF not found")) + return; + + d = btf_dump__new(btf, btf_dump_snprintf, str, NULL); + if (!ASSERT_OK_PTR(d, "could not create BTF dump")) + goto out; + + test_btf_datasec(btf, d, str, "license", + "SEC(\"license\") char[4] _license = (char[4])['G','P','L',];", + license, sizeof(license)); +out: + btf_dump__free(d); + btf__free(btf); +} + +void test_btf_dump() { + char str[STRSIZE]; + struct btf_dump *d; + struct btf *btf; + int i; + + for (i = 0; i < ARRAY_SIZE(btf_dump_test_cases); i++) { + struct btf_dump_test_case *t = &btf_dump_test_cases[i]; + + if (!test__start_subtest(t->name)) + continue; + + test_btf_dump_case(i, &btf_dump_test_cases[i]); + } + if (test__start_subtest("btf_dump: incremental")) + test_btf_dump_incremental(); + + btf = libbpf_find_kernel_btf(); + if (!ASSERT_OK_PTR(btf, "no kernel BTF found")) + return; + + d = btf_dump__new(btf, btf_dump_snprintf, str, NULL); + if (!ASSERT_OK_PTR(d, "could not create BTF dump")) + return; + + /* Verify type display for various types. */ + if (test__start_subtest("btf_dump: int_data")) + test_btf_dump_int_data(btf, d, str); + if (test__start_subtest("btf_dump: float_data")) + test_btf_dump_float_data(btf, d, str); + if (test__start_subtest("btf_dump: char_data")) + test_btf_dump_char_data(btf, d, str); + if (test__start_subtest("btf_dump: typedef_data")) + test_btf_dump_typedef_data(btf, d, str); + if (test__start_subtest("btf_dump: enum_data")) + test_btf_dump_enum_data(btf, d, str); + if (test__start_subtest("btf_dump: struct_data")) + test_btf_dump_struct_data(btf, d, str); + if (test__start_subtest("btf_dump: var_data")) + test_btf_dump_var_data(btf, d, str); + btf_dump__free(d); + btf__free(btf); + + if (test__start_subtest("btf_dump: datasec_data")) + test_btf_dump_datasec_data(str); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_endian.c b/tools/testing/selftests/bpf/prog_tests/btf_endian.c new file mode 100644 index 000000000..5b9f84dbe --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_endian.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#define _GNU_SOURCE +#include <string.h> +#include <byteswap.h> +#include <test_progs.h> +#include <bpf/btf.h> + +void test_btf_endian() { +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + enum btf_endianness endian = BTF_LITTLE_ENDIAN; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + enum btf_endianness endian = BTF_BIG_ENDIAN; +#else +#error "Unrecognized __BYTE_ORDER__" +#endif + enum btf_endianness swap_endian = 1 - endian; + struct btf *btf = NULL, *swap_btf = NULL; + const void *raw_data, *swap_raw_data; + const struct btf_type *t; + const struct btf_header *hdr; + __u32 raw_sz, swap_raw_sz; + int var_id; + + /* Load BTF in native endianness */ + btf = btf__parse_elf("btf_dump_test_case_syntax.bpf.o", NULL); + if (!ASSERT_OK_PTR(btf, "parse_native_btf")) + goto err_out; + + ASSERT_EQ(btf__endianness(btf), endian, "endian"); + btf__set_endianness(btf, swap_endian); + ASSERT_EQ(btf__endianness(btf), swap_endian, "endian"); + + /* Get raw BTF data in non-native endianness... */ + raw_data = btf__raw_data(btf, &raw_sz); + if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted")) + goto err_out; + + /* ...and open it as a new BTF instance */ + swap_btf = btf__new(raw_data, raw_sz); + if (!ASSERT_OK_PTR(swap_btf, "parse_swap_btf")) + goto err_out; + + ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian"); + ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types"); + + swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz); + if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data")) + goto err_out; + + /* both raw data should be identical (with non-native endianness) */ + ASSERT_OK(memcmp(raw_data, swap_raw_data, raw_sz), "mem_identical"); + + /* make sure that at least BTF header data is really swapped */ + hdr = swap_raw_data; + ASSERT_EQ(bswap_16(hdr->magic), BTF_MAGIC, "btf_magic_swapped"); + ASSERT_EQ(raw_sz, swap_raw_sz, "raw_sizes"); + + /* swap it back to native endianness */ + btf__set_endianness(swap_btf, endian); + swap_raw_data = btf__raw_data(swap_btf, &swap_raw_sz); + if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data")) + goto err_out; + + /* now header should have native BTF_MAGIC */ + hdr = swap_raw_data; + ASSERT_EQ(hdr->magic, BTF_MAGIC, "btf_magic_native"); + ASSERT_EQ(raw_sz, swap_raw_sz, "raw_sizes"); + + /* now modify original BTF */ + var_id = btf__add_var(btf, "some_var", BTF_VAR_GLOBAL_ALLOCATED, 1); + ASSERT_GT(var_id, 0, "var_id"); + + btf__free(swap_btf); + swap_btf = NULL; + + btf__set_endianness(btf, swap_endian); + raw_data = btf__raw_data(btf, &raw_sz); + if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted")) + goto err_out; + + /* and re-open swapped raw data again */ + swap_btf = btf__new(raw_data, raw_sz); + if (!ASSERT_OK_PTR(swap_btf, "parse_swap_btf")) + goto err_out; + + ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian"); + ASSERT_EQ(btf__type_cnt(swap_btf), btf__type_cnt(btf), "nr_types"); + + /* the type should appear as if it was stored in native endianness */ + t = btf__type_by_id(swap_btf, var_id); + ASSERT_STREQ(btf__str_by_offset(swap_btf, t->name_off), "some_var", "var_name"); + ASSERT_EQ(btf_var(t)->linkage, BTF_VAR_GLOBAL_ALLOCATED, "var_linkage"); + ASSERT_EQ(t->type, 1, "var_type"); + +err_out: + btf__free(btf); + btf__free(swap_btf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c new file mode 100644 index 000000000..eb90a6b88 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> + +#include "test_btf_map_in_map.skel.h" + +static int duration; + +static __u32 bpf_map_id(struct bpf_map *map) +{ + struct bpf_map_info info; + __u32 info_len = sizeof(info); + int err; + + memset(&info, 0, info_len); + err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len); + if (err) + return 0; + return info.id; +} + +static void test_lookup_update(void) +{ + int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id; + int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd; + struct test_btf_map_in_map *skel; + int err, key = 0, val, i, fd; + + skel = test_btf_map_in_map__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n")) + return; + + err = test_btf_map_in_map__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + map1_fd = bpf_map__fd(skel->maps.inner_map1); + map2_fd = bpf_map__fd(skel->maps.inner_map2); + map3_fd = bpf_map__fd(skel->maps.inner_map3); + map4_fd = bpf_map__fd(skel->maps.inner_map4); + map5_fd = bpf_map__fd(skel->maps.inner_map5); + outer_arr_dyn_fd = bpf_map__fd(skel->maps.outer_arr_dyn); + outer_arr_fd = bpf_map__fd(skel->maps.outer_arr); + outer_hash_fd = bpf_map__fd(skel->maps.outer_hash); + + /* inner1 = input, inner2 = input + 1, inner3 = input + 2 */ + bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0); + bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0); + bpf_map_update_elem(outer_arr_dyn_fd, &key, &map3_fd, 0); + skel->bss->input = 1; + usleep(1); + bpf_map_lookup_elem(map1_fd, &key, &val); + CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1); + bpf_map_lookup_elem(map2_fd, &key, &val); + CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2); + bpf_map_lookup_elem(map3_fd, &key, &val); + CHECK(val != 3, "inner3", "got %d != exp %d\n", val, 3); + + /* inner2 = input, inner1 = input + 1, inner4 = input + 2 */ + bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0); + bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0); + bpf_map_update_elem(outer_arr_dyn_fd, &key, &map4_fd, 0); + skel->bss->input = 3; + usleep(1); + bpf_map_lookup_elem(map1_fd, &key, &val); + CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4); + bpf_map_lookup_elem(map2_fd, &key, &val); + CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3); + bpf_map_lookup_elem(map4_fd, &key, &val); + CHECK(val != 5, "inner4", "got %d != exp %d\n", val, 5); + + /* inner5 = input + 2 */ + bpf_map_update_elem(outer_arr_dyn_fd, &key, &map5_fd, 0); + skel->bss->input = 5; + usleep(1); + bpf_map_lookup_elem(map5_fd, &key, &val); + CHECK(val != 7, "inner5", "got %d != exp %d\n", val, 7); + + for (i = 0; i < 5; i++) { + val = i % 2 ? map1_fd : map2_fd; + err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update hash_of_maps on iter #%d\n", i); + goto cleanup; + } + err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update array_of_maps on iter #%d\n", i); + goto cleanup; + } + val = i % 2 ? map4_fd : map5_fd; + err = bpf_map_update_elem(outer_arr_dyn_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update array_of_maps (dyn) on iter #%d\n", i); + goto cleanup; + } + } + + map1_id = bpf_map_id(skel->maps.inner_map1); + map2_id = bpf_map_id(skel->maps.inner_map2); + CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n"); + CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n"); + + test_btf_map_in_map__destroy(skel); + skel = NULL; + + /* we need to either wait for or force synchronize_rcu(), before + * checking for "still exists" condition, otherwise map could still be + * resolvable by ID, causing false positives. + * + * Older kernels (5.8 and earlier) freed map only after two + * synchronize_rcu()s, so trigger two, to be entirely sure. + */ + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + + fd = bpf_map_get_fd_by_id(map1_id); + if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) { + close(fd); + goto cleanup; + } + fd = bpf_map_get_fd_by_id(map2_id); + if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) { + close(fd); + goto cleanup; + } + +cleanup: + test_btf_map_in_map__destroy(skel); +} + +static void test_diff_size(void) +{ + struct test_btf_map_in_map *skel; + int err, inner_map_fd, zero = 0; + + skel = test_btf_map_in_map__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n")) + return; + + inner_map_fd = bpf_map__fd(skel->maps.sockarr_sz2); + err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_sockarr), &zero, + &inner_map_fd, 0); + CHECK(err, "outer_sockarr inner map size check", + "cannot use a different size inner_map\n"); + + inner_map_fd = bpf_map__fd(skel->maps.inner_map_sz2); + err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &zero, + &inner_map_fd, 0); + CHECK(!err, "outer_arr inner map size check", + "incorrectly updated with a different size inner_map\n"); + + test_btf_map_in_map__destroy(skel); +} + +void test_btf_map_in_map(void) +{ + if (test__start_subtest("lookup_update")) + test_lookup_update(); + + if (test__start_subtest("diff_size")) + test_diff_size(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_module.c b/tools/testing/selftests/bpf/prog_tests/btf_module.c new file mode 100644 index 000000000..2239d1fe0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_module.c @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include <test_progs.h> +#include <bpf/btf.h> + +static const char *module_name = "bpf_testmod"; +static const char *symbol_name = "bpf_testmod_test_read"; + +void test_btf_module() +{ + struct btf *vmlinux_btf, *module_btf; + __s32 type_id; + + if (!env.has_testmod) { + test__skip(); + return; + } + + vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF")) + return; + + module_btf = btf__load_module_btf(module_name, vmlinux_btf); + if (!ASSERT_OK_PTR(module_btf, "could not load module BTF")) + goto cleanup; + + type_id = btf__find_by_name(module_btf, symbol_name); + ASSERT_GT(type_id, 0, "func not found"); + +cleanup: + btf__free(module_btf); + btf__free(vmlinux_btf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c new file mode 100644 index 000000000..7a277035c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#define _GNU_SOURCE +#include <netinet/in.h> +#include <arpa/inet.h> +#include <unistd.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <sched.h> +#include <linux/compiler.h> +#include <bpf/libbpf.h> + +#include "network_helpers.h" +#include "test_progs.h" +#include "test_btf_skc_cls_ingress.skel.h" + +static struct test_btf_skc_cls_ingress *skel; +static struct sockaddr_in6 srv_sa6; +static __u32 duration; + +#define PROG_PIN_FILE "/sys/fs/bpf/btf_skc_cls_ingress" + +static int prepare_netns(void) +{ + if (CHECK(unshare(CLONE_NEWNET), "create netns", + "unshare(CLONE_NEWNET): %s (%d)", + strerror(errno), errno)) + return -1; + + if (CHECK(system("ip link set dev lo up"), + "ip link set dev lo up", "failed\n")) + return -1; + + if (CHECK(system("tc qdisc add dev lo clsact"), + "tc qdisc add dev lo clsact", "failed\n")) + return -1; + + if (CHECK(system("tc filter add dev lo ingress bpf direct-action object-pinned " PROG_PIN_FILE), + "install tc cls-prog at ingress", "failed\n")) + return -1; + + /* Ensure 20 bytes options (i.e. in total 40 bytes tcp header) for the + * bpf_tcp_gen_syncookie() helper. + */ + if (write_sysctl("/proc/sys/net/ipv4/tcp_window_scaling", "1") || + write_sysctl("/proc/sys/net/ipv4/tcp_timestamps", "1") || + write_sysctl("/proc/sys/net/ipv4/tcp_sack", "1")) + return -1; + + return 0; +} + +static void reset_test(void) +{ + memset(&skel->bss->srv_sa6, 0, sizeof(skel->bss->srv_sa6)); + skel->bss->listen_tp_sport = 0; + skel->bss->req_sk_sport = 0; + skel->bss->recv_cookie = 0; + skel->bss->gen_cookie = 0; + skel->bss->linum = 0; +} + +static void print_err_line(void) +{ + if (skel->bss->linum) + printf("bpf prog error at line %u\n", skel->bss->linum); +} + +static void test_conn(void) +{ + int listen_fd = -1, cli_fd = -1, srv_fd = -1, err; + socklen_t addrlen = sizeof(srv_sa6); + int srv_port; + + if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1")) + return; + + listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + if (CHECK_FAIL(listen_fd == -1)) + return; + + err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen); + if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err, + errno)) + goto done; + memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6)); + srv_port = ntohs(srv_sa6.sin6_port); + + cli_fd = connect_to_fd(listen_fd, 0); + if (CHECK_FAIL(cli_fd == -1)) + goto done; + + srv_fd = accept(listen_fd, NULL, NULL); + if (CHECK_FAIL(srv_fd == -1)) + goto done; + + if (CHECK(skel->bss->listen_tp_sport != srv_port || + skel->bss->req_sk_sport != srv_port, + "Unexpected sk src port", + "listen_tp_sport:%u req_sk_sport:%u expected:%u\n", + skel->bss->listen_tp_sport, skel->bss->req_sk_sport, + srv_port)) + goto done; + + if (CHECK(skel->bss->gen_cookie || skel->bss->recv_cookie, + "Unexpected syncookie states", + "gen_cookie:%u recv_cookie:%u\n", + skel->bss->gen_cookie, skel->bss->recv_cookie)) + goto done; + + CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n", + skel->bss->linum); + +done: + if (listen_fd != -1) + close(listen_fd); + if (cli_fd != -1) + close(cli_fd); + if (srv_fd != -1) + close(srv_fd); +} + +static void test_syncookie(void) +{ + int listen_fd = -1, cli_fd = -1, srv_fd = -1, err; + socklen_t addrlen = sizeof(srv_sa6); + int srv_port; + + /* Enforce syncookie mode */ + if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2")) + return; + + listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + if (CHECK_FAIL(listen_fd == -1)) + return; + + err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen); + if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err, + errno)) + goto done; + memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6)); + srv_port = ntohs(srv_sa6.sin6_port); + + cli_fd = connect_to_fd(listen_fd, 0); + if (CHECK_FAIL(cli_fd == -1)) + goto done; + + srv_fd = accept(listen_fd, NULL, NULL); + if (CHECK_FAIL(srv_fd == -1)) + goto done; + + if (CHECK(skel->bss->listen_tp_sport != srv_port, + "Unexpected tp src port", + "listen_tp_sport:%u expected:%u\n", + skel->bss->listen_tp_sport, srv_port)) + goto done; + + if (CHECK(skel->bss->req_sk_sport, + "Unexpected req_sk src port", + "req_sk_sport:%u expected:0\n", + skel->bss->req_sk_sport)) + goto done; + + if (CHECK(!skel->bss->gen_cookie || + skel->bss->gen_cookie != skel->bss->recv_cookie, + "Unexpected syncookie states", + "gen_cookie:%u recv_cookie:%u\n", + skel->bss->gen_cookie, skel->bss->recv_cookie)) + goto done; + + CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n", + skel->bss->linum); + +done: + if (listen_fd != -1) + close(listen_fd); + if (cli_fd != -1) + close(cli_fd); + if (srv_fd != -1) + close(srv_fd); +} + +struct test { + const char *desc; + void (*run)(void); +}; + +#define DEF_TEST(name) { #name, test_##name } +static struct test tests[] = { + DEF_TEST(conn), + DEF_TEST(syncookie), +}; + +void test_btf_skc_cls_ingress(void) +{ + int i, err; + + skel = test_btf_skc_cls_ingress__open_and_load(); + if (CHECK(!skel, "test_btf_skc_cls_ingress__open_and_load", "failed\n")) + return; + + err = bpf_program__pin(skel->progs.cls_ingress, PROG_PIN_FILE); + if (CHECK(err, "bpf_program__pin", + "cannot pin bpf prog to %s. err:%d\n", PROG_PIN_FILE, err)) { + test_btf_skc_cls_ingress__destroy(skel); + return; + } + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (!test__start_subtest(tests[i].desc)) + continue; + + if (prepare_netns()) + break; + + tests[i].run(); + + print_err_line(); + reset_test(); + } + + bpf_program__unpin(skel->progs.cls_ingress, PROG_PIN_FILE); + test_btf_skc_cls_ingress__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_split.c b/tools/testing/selftests/bpf/prog_tests/btf_split.c new file mode 100644 index 000000000..eef115867 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_split.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> +#include <bpf/btf.h> + +static char *dump_buf; +static size_t dump_buf_sz; +static FILE *dump_buf_file; + +static void btf_dump_printf(void *ctx, const char *fmt, va_list args) +{ + vfprintf(ctx, fmt, args); +} + +void test_btf_split() { + struct btf_dump *d = NULL; + const struct btf_type *t; + struct btf *btf1, *btf2; + int str_off, i, err; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf__set_pointer_size(btf1, 8); /* enforce 64-bit arch */ + + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(btf1, 1); /* [2] ptr to int */ + + btf__add_struct(btf1, "s1", 4); /* [3] struct s1 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + /* pointer size should be "inherited" from main BTF */ + ASSERT_EQ(btf__pointer_size(btf2), 8, "inherit_ptr_sz"); + + str_off = btf__find_str(btf2, "int"); + ASSERT_NEQ(str_off, -ENOENT, "str_int_missing"); + + t = btf__type_by_id(btf2, 1); + if (!ASSERT_OK_PTR(t, "int_type")) + goto cleanup; + ASSERT_EQ(btf_is_int(t), true, "int_kind"); + ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "int", "int_name"); + + btf__add_struct(btf2, "s2", 16); /* [4] struct s2 { */ + btf__add_field(btf2, "f1", 3, 0, 0); /* struct s1 f1; */ + btf__add_field(btf2, "f2", 1, 32, 0); /* int f2; */ + btf__add_field(btf2, "f3", 2, 64, 0); /* int *f3; */ + /* } */ + + t = btf__type_by_id(btf1, 4); + ASSERT_NULL(t, "split_type_in_main"); + + t = btf__type_by_id(btf2, 4); + if (!ASSERT_OK_PTR(t, "split_struct_type")) + goto cleanup; + ASSERT_EQ(btf_is_struct(t), true, "split_struct_kind"); + ASSERT_EQ(btf_vlen(t), 3, "split_struct_vlen"); + ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "s2", "split_struct_name"); + + /* BTF-to-C dump of split BTF */ + dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz); + if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream")) + return; + d = btf_dump__new(btf2, btf_dump_printf, dump_buf_file, NULL); + if (!ASSERT_OK_PTR(d, "btf_dump__new")) + goto cleanup; + for (i = 1; i < btf__type_cnt(btf2); i++) { + err = btf_dump__dump_type(d, i); + ASSERT_OK(err, "dump_type_ok"); + } + fflush(dump_buf_file); + dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ + ASSERT_STREQ(dump_buf, +"struct s1 {\n" +" int f1;\n" +"};\n" +"\n" +"struct s2 {\n" +" struct s1 f1;\n" +" int f2;\n" +" int *f3;\n" +"};\n\n", "c_dump"); + +cleanup: + if (dump_buf_file) + fclose(dump_buf_file); + free(dump_buf); + btf_dump__free(d); + btf__free(btf1); + btf__free(btf2); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c new file mode 100644 index 000000000..071430cd5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include <bpf/btf.h> +#include "test_btf_decl_tag.skel.h" + +/* struct btf_type_tag_test is referenced in btf_type_tag.skel.h */ +struct btf_type_tag_test { + int **p; +}; +#include "btf_type_tag.skel.h" +#include "btf_type_tag_user.skel.h" +#include "btf_type_tag_percpu.skel.h" + +static void test_btf_decl_tag(void) +{ + struct test_btf_decl_tag *skel; + + skel = test_btf_decl_tag__open_and_load(); + if (!ASSERT_OK_PTR(skel, "btf_decl_tag")) + return; + + if (skel->rodata->skip_tests) { + printf("%s:SKIP: btf_decl_tag attribute not supported", __func__); + test__skip(); + } + + test_btf_decl_tag__destroy(skel); +} + +static void test_btf_type_tag(void) +{ + struct btf_type_tag *skel; + + skel = btf_type_tag__open_and_load(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag")) + return; + + if (skel->rodata->skip_tests) { + printf("%s:SKIP: btf_type_tag attribute not supported", __func__); + test__skip(); + } + + btf_type_tag__destroy(skel); +} + +/* loads vmlinux_btf as well as module_btf. If the caller passes NULL as + * module_btf, it will not load module btf. + * + * Returns 0 on success. + * Return -1 On error. In case of error, the loaded btf will be freed and the + * input parameters will be set to pointing to NULL. + */ +static int load_btfs(struct btf **vmlinux_btf, struct btf **module_btf, + bool needs_vmlinux_tag) +{ + const char *module_name = "bpf_testmod"; + __s32 type_id; + + if (!env.has_testmod) { + test__skip(); + return -1; + } + + *vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(*vmlinux_btf, "could not load vmlinux BTF")) + return -1; + + if (!needs_vmlinux_tag) + goto load_module_btf; + + /* skip the test if the vmlinux does not have __user tags */ + type_id = btf__find_by_name_kind(*vmlinux_btf, "user", BTF_KIND_TYPE_TAG); + if (type_id <= 0) { + printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__); + test__skip(); + goto free_vmlinux_btf; + } + +load_module_btf: + /* skip loading module_btf, if not requested by caller */ + if (!module_btf) + return 0; + + *module_btf = btf__load_module_btf(module_name, *vmlinux_btf); + if (!ASSERT_OK_PTR(*module_btf, "could not load module BTF")) + goto free_vmlinux_btf; + + /* skip the test if the module does not have __user tags */ + type_id = btf__find_by_name_kind(*module_btf, "user", BTF_KIND_TYPE_TAG); + if (type_id <= 0) { + printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name); + test__skip(); + goto free_module_btf; + } + + return 0; + +free_module_btf: + btf__free(*module_btf); +free_vmlinux_btf: + btf__free(*vmlinux_btf); + + *vmlinux_btf = NULL; + if (module_btf) + *module_btf = NULL; + return -1; +} + +static void test_btf_type_tag_mod_user(bool load_test_user1) +{ + struct btf *vmlinux_btf = NULL, *module_btf = NULL; + struct btf_type_tag_user *skel; + int err; + + if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false)) + return; + + skel = btf_type_tag_user__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_sys_getsockname, false); + if (load_test_user1) + bpf_program__set_autoload(skel->progs.test_user2, false); + else + bpf_program__set_autoload(skel->progs.test_user1, false); + + err = btf_type_tag_user__load(skel); + ASSERT_ERR(err, "btf_type_tag_user"); + + btf_type_tag_user__destroy(skel); + +cleanup: + btf__free(module_btf); + btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_vmlinux_user(void) +{ + struct btf_type_tag_user *skel; + struct btf *vmlinux_btf = NULL; + int err; + + if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true)) + return; + + skel = btf_type_tag_user__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_user2, false); + bpf_program__set_autoload(skel->progs.test_user1, false); + + err = btf_type_tag_user__load(skel); + ASSERT_ERR(err, "btf_type_tag_user"); + + btf_type_tag_user__destroy(skel); + +cleanup: + btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_mod_percpu(bool load_test_percpu1) +{ + struct btf *vmlinux_btf, *module_btf; + struct btf_type_tag_percpu *skel; + int err; + + if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false)) + return; + + skel = btf_type_tag_percpu__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_percpu_load, false); + bpf_program__set_autoload(skel->progs.test_percpu_helper, false); + if (load_test_percpu1) + bpf_program__set_autoload(skel->progs.test_percpu2, false); + else + bpf_program__set_autoload(skel->progs.test_percpu1, false); + + err = btf_type_tag_percpu__load(skel); + ASSERT_ERR(err, "btf_type_tag_percpu"); + + btf_type_tag_percpu__destroy(skel); + +cleanup: + btf__free(module_btf); + btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_vmlinux_percpu(bool load_test) +{ + struct btf_type_tag_percpu *skel; + struct btf *vmlinux_btf = NULL; + int err; + + if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true)) + return; + + skel = btf_type_tag_percpu__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_percpu2, false); + bpf_program__set_autoload(skel->progs.test_percpu1, false); + if (load_test) { + bpf_program__set_autoload(skel->progs.test_percpu_helper, false); + + err = btf_type_tag_percpu__load(skel); + ASSERT_ERR(err, "btf_type_tag_percpu_load"); + } else { + bpf_program__set_autoload(skel->progs.test_percpu_load, false); + + err = btf_type_tag_percpu__load(skel); + ASSERT_OK(err, "btf_type_tag_percpu_helper"); + } + + btf_type_tag_percpu__destroy(skel); + +cleanup: + btf__free(vmlinux_btf); +} + +void test_btf_tag(void) +{ + if (test__start_subtest("btf_decl_tag")) + test_btf_decl_tag(); + if (test__start_subtest("btf_type_tag")) + test_btf_type_tag(); + + if (test__start_subtest("btf_type_tag_user_mod1")) + test_btf_type_tag_mod_user(true); + if (test__start_subtest("btf_type_tag_user_mod2")) + test_btf_type_tag_mod_user(false); + if (test__start_subtest("btf_type_tag_sys_user_vmlinux")) + test_btf_type_tag_vmlinux_user(); + + if (test__start_subtest("btf_type_tag_percpu_mod1")) + test_btf_type_tag_mod_percpu(true); + if (test__start_subtest("btf_type_tag_percpu_mod2")) + test_btf_type_tag_mod_percpu(false); + if (test__start_subtest("btf_type_tag_percpu_vmlinux_load")) + test_btf_type_tag_vmlinux_percpu(true); + if (test__start_subtest("btf_type_tag_percpu_vmlinux_helper")) + test_btf_type_tag_vmlinux_percpu(false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c new file mode 100644 index 000000000..6e36de130 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> +#include <bpf/btf.h> +#include "btf_helpers.h" + +static void gen_btf(struct btf *btf) +{ + const struct btf_var_secinfo *vi; + const struct btf_type *t; + const struct btf_member *m; + const struct btf_enum64 *v64; + const struct btf_enum *v; + const struct btf_param *p; + int id, err, str_off; + + str_off = btf__find_str(btf, "int"); + ASSERT_EQ(str_off, -ENOENT, "int_str_missing_off"); + + str_off = btf__add_str(btf, "int"); + ASSERT_EQ(str_off, 1, "int_str_off"); + + str_off = btf__find_str(btf, "int"); + ASSERT_EQ(str_off, 1, "int_str_found_off"); + + /* BTF_KIND_INT */ + id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED); + ASSERT_EQ(id, 1, "int_id"); + + t = btf__type_by_id(btf, 1); + /* should re-use previously added "int" string */ + ASSERT_EQ(t->name_off, str_off, "int_name_off"); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "int", "int_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_INT, "int_kind"); + ASSERT_EQ(t->size, 4, "int_sz"); + ASSERT_EQ(btf_int_encoding(t), BTF_INT_SIGNED, "int_enc"); + ASSERT_EQ(btf_int_bits(t), 32, "int_bits"); + ASSERT_STREQ(btf_type_raw_dump(btf, 1), + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", "raw_dump"); + + /* invalid int size */ + id = btf__add_int(btf, "bad sz int", 7, 0); + ASSERT_ERR(id, "int_bad_sz"); + /* invalid encoding */ + id = btf__add_int(btf, "bad enc int", 4, 123); + ASSERT_ERR(id, "int_bad_enc"); + /* NULL name */ + id = btf__add_int(btf, NULL, 4, 0); + ASSERT_ERR(id, "int_bad_null_name"); + /* empty name */ + id = btf__add_int(btf, "", 4, 0); + ASSERT_ERR(id, "int_bad_empty_name"); + + /* PTR/CONST/VOLATILE/RESTRICT */ + id = btf__add_ptr(btf, 1); + ASSERT_EQ(id, 2, "ptr_id"); + t = btf__type_by_id(btf, 2); + ASSERT_EQ(btf_kind(t), BTF_KIND_PTR, "ptr_kind"); + ASSERT_EQ(t->type, 1, "ptr_type"); + ASSERT_STREQ(btf_type_raw_dump(btf, 2), + "[2] PTR '(anon)' type_id=1", "raw_dump"); + + id = btf__add_const(btf, 5); /* points forward to restrict */ + ASSERT_EQ(id, 3, "const_id"); + t = btf__type_by_id(btf, 3); + ASSERT_EQ(btf_kind(t), BTF_KIND_CONST, "const_kind"); + ASSERT_EQ(t->type, 5, "const_type"); + ASSERT_STREQ(btf_type_raw_dump(btf, 3), + "[3] CONST '(anon)' type_id=5", "raw_dump"); + + id = btf__add_volatile(btf, 3); + ASSERT_EQ(id, 4, "volatile_id"); + t = btf__type_by_id(btf, 4); + ASSERT_EQ(btf_kind(t), BTF_KIND_VOLATILE, "volatile_kind"); + ASSERT_EQ(t->type, 3, "volatile_type"); + ASSERT_STREQ(btf_type_raw_dump(btf, 4), + "[4] VOLATILE '(anon)' type_id=3", "raw_dump"); + + id = btf__add_restrict(btf, 4); + ASSERT_EQ(id, 5, "restrict_id"); + t = btf__type_by_id(btf, 5); + ASSERT_EQ(btf_kind(t), BTF_KIND_RESTRICT, "restrict_kind"); + ASSERT_EQ(t->type, 4, "restrict_type"); + ASSERT_STREQ(btf_type_raw_dump(btf, 5), + "[5] RESTRICT '(anon)' type_id=4", "raw_dump"); + + /* ARRAY */ + id = btf__add_array(btf, 1, 2, 10); /* int *[10] */ + ASSERT_EQ(id, 6, "array_id"); + t = btf__type_by_id(btf, 6); + ASSERT_EQ(btf_kind(t), BTF_KIND_ARRAY, "array_kind"); + ASSERT_EQ(btf_array(t)->index_type, 1, "array_index_type"); + ASSERT_EQ(btf_array(t)->type, 2, "array_elem_type"); + ASSERT_EQ(btf_array(t)->nelems, 10, "array_nelems"); + ASSERT_STREQ(btf_type_raw_dump(btf, 6), + "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10", "raw_dump"); + + /* STRUCT */ + err = btf__add_field(btf, "field", 1, 0, 0); + ASSERT_ERR(err, "no_struct_field"); + id = btf__add_struct(btf, "s1", 8); + ASSERT_EQ(id, 7, "struct_id"); + err = btf__add_field(btf, "f1", 1, 0, 0); + ASSERT_OK(err, "f1_res"); + err = btf__add_field(btf, "f2", 1, 32, 16); + ASSERT_OK(err, "f2_res"); + + t = btf__type_by_id(btf, 7); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "s1", "struct_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_STRUCT, "struct_kind"); + ASSERT_EQ(btf_vlen(t), 2, "struct_vlen"); + ASSERT_EQ(btf_kflag(t), true, "struct_kflag"); + ASSERT_EQ(t->size, 8, "struct_sz"); + m = btf_members(t) + 0; + ASSERT_STREQ(btf__str_by_offset(btf, m->name_off), "f1", "f1_name"); + ASSERT_EQ(m->type, 1, "f1_type"); + ASSERT_EQ(btf_member_bit_offset(t, 0), 0, "f1_bit_off"); + ASSERT_EQ(btf_member_bitfield_size(t, 0), 0, "f1_bit_sz"); + m = btf_members(t) + 1; + ASSERT_STREQ(btf__str_by_offset(btf, m->name_off), "f2", "f2_name"); + ASSERT_EQ(m->type, 1, "f2_type"); + ASSERT_EQ(btf_member_bit_offset(t, 1), 32, "f2_bit_off"); + ASSERT_EQ(btf_member_bitfield_size(t, 1), 16, "f2_bit_sz"); + ASSERT_STREQ(btf_type_raw_dump(btf, 7), + "[7] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32 bitfield_size=16", "raw_dump"); + + /* UNION */ + id = btf__add_union(btf, "u1", 8); + ASSERT_EQ(id, 8, "union_id"); + + /* invalid, non-zero offset */ + err = btf__add_field(btf, "field", 1, 1, 0); + ASSERT_ERR(err, "no_struct_field"); + + err = btf__add_field(btf, "f1", 1, 0, 16); + ASSERT_OK(err, "f1_res"); + + t = btf__type_by_id(btf, 8); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "u1", "union_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_UNION, "union_kind"); + ASSERT_EQ(btf_vlen(t), 1, "union_vlen"); + ASSERT_EQ(btf_kflag(t), true, "union_kflag"); + ASSERT_EQ(t->size, 8, "union_sz"); + m = btf_members(t) + 0; + ASSERT_STREQ(btf__str_by_offset(btf, m->name_off), "f1", "f1_name"); + ASSERT_EQ(m->type, 1, "f1_type"); + ASSERT_EQ(btf_member_bit_offset(t, 0), 0, "f1_bit_off"); + ASSERT_EQ(btf_member_bitfield_size(t, 0), 16, "f1_bit_sz"); + ASSERT_STREQ(btf_type_raw_dump(btf, 8), + "[8] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0 bitfield_size=16", "raw_dump"); + + /* ENUM */ + id = btf__add_enum(btf, "e1", 4); + ASSERT_EQ(id, 9, "enum_id"); + err = btf__add_enum_value(btf, "v1", 1); + ASSERT_OK(err, "v1_res"); + err = btf__add_enum_value(btf, "v2", 2); + ASSERT_OK(err, "v2_res"); + + t = btf__type_by_id(btf, 9); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM, "enum_kind"); + ASSERT_EQ(btf_vlen(t), 2, "enum_vlen"); + ASSERT_EQ(t->size, 4, "enum_sz"); + v = btf_enum(t) + 0; + ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v1", "v1_name"); + ASSERT_EQ(v->val, 1, "v1_val"); + v = btf_enum(t) + 1; + ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v2", "v2_name"); + ASSERT_EQ(v->val, 2, "v2_val"); + ASSERT_STREQ(btf_type_raw_dump(btf, 9), + "[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", "raw_dump"); + + /* FWDs */ + id = btf__add_fwd(btf, "struct_fwd", BTF_FWD_STRUCT); + ASSERT_EQ(id, 10, "struct_fwd_id"); + t = btf__type_by_id(btf, 10); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "struct_fwd", "fwd_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_FWD, "fwd_kind"); + ASSERT_EQ(btf_kflag(t), 0, "fwd_kflag"); + ASSERT_STREQ(btf_type_raw_dump(btf, 10), + "[10] FWD 'struct_fwd' fwd_kind=struct", "raw_dump"); + + id = btf__add_fwd(btf, "union_fwd", BTF_FWD_UNION); + ASSERT_EQ(id, 11, "union_fwd_id"); + t = btf__type_by_id(btf, 11); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "union_fwd", "fwd_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_FWD, "fwd_kind"); + ASSERT_EQ(btf_kflag(t), 1, "fwd_kflag"); + ASSERT_STREQ(btf_type_raw_dump(btf, 11), + "[11] FWD 'union_fwd' fwd_kind=union", "raw_dump"); + + id = btf__add_fwd(btf, "enum_fwd", BTF_FWD_ENUM); + ASSERT_EQ(id, 12, "enum_fwd_id"); + t = btf__type_by_id(btf, 12); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "enum_fwd", "fwd_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM, "enum_fwd_kind"); + ASSERT_EQ(btf_vlen(t), 0, "enum_fwd_kind"); + ASSERT_EQ(t->size, 4, "enum_fwd_sz"); + ASSERT_STREQ(btf_type_raw_dump(btf, 12), + "[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0", "raw_dump"); + + /* TYPEDEF */ + id = btf__add_typedef(btf, "typedef1", 1); + ASSERT_EQ(id, 13, "typedef_fwd_id"); + t = btf__type_by_id(btf, 13); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "typedef1", "typedef_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_TYPEDEF, "typedef_kind"); + ASSERT_EQ(t->type, 1, "typedef_type"); + ASSERT_STREQ(btf_type_raw_dump(btf, 13), + "[13] TYPEDEF 'typedef1' type_id=1", "raw_dump"); + + /* FUNC & FUNC_PROTO */ + id = btf__add_func(btf, "func1", BTF_FUNC_GLOBAL, 15); + ASSERT_EQ(id, 14, "func_id"); + t = btf__type_by_id(btf, 14); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "func1", "func_name"); + ASSERT_EQ(t->type, 15, "func_type"); + ASSERT_EQ(btf_kind(t), BTF_KIND_FUNC, "func_kind"); + ASSERT_EQ(btf_vlen(t), BTF_FUNC_GLOBAL, "func_vlen"); + ASSERT_STREQ(btf_type_raw_dump(btf, 14), + "[14] FUNC 'func1' type_id=15 linkage=global", "raw_dump"); + + id = btf__add_func_proto(btf, 1); + ASSERT_EQ(id, 15, "func_proto_id"); + err = btf__add_func_param(btf, "p1", 1); + ASSERT_OK(err, "p1_res"); + err = btf__add_func_param(btf, "p2", 2); + ASSERT_OK(err, "p2_res"); + + t = btf__type_by_id(btf, 15); + ASSERT_EQ(btf_kind(t), BTF_KIND_FUNC_PROTO, "func_proto_kind"); + ASSERT_EQ(btf_vlen(t), 2, "func_proto_vlen"); + ASSERT_EQ(t->type, 1, "func_proto_ret_type"); + p = btf_params(t) + 0; + ASSERT_STREQ(btf__str_by_offset(btf, p->name_off), "p1", "p1_name"); + ASSERT_EQ(p->type, 1, "p1_type"); + p = btf_params(t) + 1; + ASSERT_STREQ(btf__str_by_offset(btf, p->name_off), "p2", "p2_name"); + ASSERT_EQ(p->type, 2, "p2_type"); + ASSERT_STREQ(btf_type_raw_dump(btf, 15), + "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" + "\t'p1' type_id=1\n" + "\t'p2' type_id=2", "raw_dump"); + + /* VAR */ + id = btf__add_var(btf, "var1", BTF_VAR_GLOBAL_ALLOCATED, 1); + ASSERT_EQ(id, 16, "var_id"); + t = btf__type_by_id(btf, 16); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "var1", "var_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_VAR, "var_kind"); + ASSERT_EQ(t->type, 1, "var_type"); + ASSERT_EQ(btf_var(t)->linkage, BTF_VAR_GLOBAL_ALLOCATED, "var_type"); + ASSERT_STREQ(btf_type_raw_dump(btf, 16), + "[16] VAR 'var1' type_id=1, linkage=global-alloc", "raw_dump"); + + /* DATASECT */ + id = btf__add_datasec(btf, "datasec1", 12); + ASSERT_EQ(id, 17, "datasec_id"); + err = btf__add_datasec_var_info(btf, 1, 4, 8); + ASSERT_OK(err, "v1_res"); + + t = btf__type_by_id(btf, 17); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "datasec1", "datasec_name"); + ASSERT_EQ(t->size, 12, "datasec_sz"); + ASSERT_EQ(btf_kind(t), BTF_KIND_DATASEC, "datasec_kind"); + ASSERT_EQ(btf_vlen(t), 1, "datasec_vlen"); + vi = btf_var_secinfos(t) + 0; + ASSERT_EQ(vi->type, 1, "v1_type"); + ASSERT_EQ(vi->offset, 4, "v1_off"); + ASSERT_EQ(vi->size, 8, "v1_sz"); + ASSERT_STREQ(btf_type_raw_dump(btf, 17), + "[17] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=1 offset=4 size=8", "raw_dump"); + + /* DECL_TAG */ + id = btf__add_decl_tag(btf, "tag1", 16, -1); + ASSERT_EQ(id, 18, "tag_id"); + t = btf__type_by_id(btf, 18); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value"); + ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind"); + ASSERT_EQ(t->type, 16, "tag_type"); + ASSERT_EQ(btf_decl_tag(t)->component_idx, -1, "tag_component_idx"); + ASSERT_STREQ(btf_type_raw_dump(btf, 18), + "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", "raw_dump"); + + id = btf__add_decl_tag(btf, "tag2", 14, 1); + ASSERT_EQ(id, 19, "tag_id"); + t = btf__type_by_id(btf, 19); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag2", "tag_value"); + ASSERT_EQ(btf_kind(t), BTF_KIND_DECL_TAG, "tag_kind"); + ASSERT_EQ(t->type, 14, "tag_type"); + ASSERT_EQ(btf_decl_tag(t)->component_idx, 1, "tag_component_idx"); + ASSERT_STREQ(btf_type_raw_dump(btf, 19), + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", "raw_dump"); + + /* TYPE_TAG */ + id = btf__add_type_tag(btf, "tag1", 1); + ASSERT_EQ(id, 20, "tag_id"); + t = btf__type_by_id(btf, 20); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value"); + ASSERT_EQ(btf_kind(t), BTF_KIND_TYPE_TAG, "tag_kind"); + ASSERT_EQ(t->type, 1, "tag_type"); + ASSERT_STREQ(btf_type_raw_dump(btf, 20), + "[20] TYPE_TAG 'tag1' type_id=1", "raw_dump"); + + /* ENUM64 */ + id = btf__add_enum64(btf, "e1", 8, true); + ASSERT_EQ(id, 21, "enum64_id"); + err = btf__add_enum64_value(btf, "v1", -1); + ASSERT_OK(err, "v1_res"); + err = btf__add_enum64_value(btf, "v2", 0x123456789); /* 4886718345 */ + ASSERT_OK(err, "v2_res"); + t = btf__type_by_id(btf, 21); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind"); + ASSERT_EQ(btf_vlen(t), 2, "enum64_vlen"); + ASSERT_EQ(t->size, 8, "enum64_sz"); + v64 = btf_enum64(t) + 0; + ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name"); + ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val"); + ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val"); + v64 = btf_enum64(t) + 1; + ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v2", "v2_name"); + ASSERT_EQ(v64->val_hi32, 0x1, "v2_val"); + ASSERT_EQ(v64->val_lo32, 0x23456789, "v2_val"); + ASSERT_STREQ(btf_type_raw_dump(btf, 21), + "[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n" + "\t'v1' val=-1\n" + "\t'v2' val=4886718345", "raw_dump"); + + id = btf__add_enum64(btf, "e1", 8, false); + ASSERT_EQ(id, 22, "enum64_id"); + err = btf__add_enum64_value(btf, "v1", 0xffffffffFFFFFFFF); /* 18446744073709551615 */ + ASSERT_OK(err, "v1_res"); + t = btf__type_by_id(btf, 22); + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name"); + ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind"); + ASSERT_EQ(btf_vlen(t), 1, "enum64_vlen"); + ASSERT_EQ(t->size, 8, "enum64_sz"); + v64 = btf_enum64(t) + 0; + ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name"); + ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val"); + ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val"); + ASSERT_STREQ(btf_type_raw_dump(btf, 22), + "[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n" + "\t'v1' val=18446744073709551615", "raw_dump"); +} + +static void test_btf_add() +{ + struct btf *btf; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "new_empty")) + return; + + gen_btf(btf); + + VALIDATE_RAW_BTF( + btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] CONST '(anon)' type_id=5", + "[4] VOLATILE '(anon)' type_id=3", + "[5] RESTRICT '(anon)' type_id=4", + "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10", + "[7] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32 bitfield_size=16", + "[8] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0 bitfield_size=16", + "[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[10] FWD 'struct_fwd' fwd_kind=struct", + "[11] FWD 'union_fwd' fwd_kind=union", + "[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0", + "[13] TYPEDEF 'typedef1' type_id=1", + "[14] FUNC 'func1' type_id=15 linkage=global", + "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" + "\t'p1' type_id=1\n" + "\t'p2' type_id=2", + "[16] VAR 'var1' type_id=1, linkage=global-alloc", + "[17] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=1 offset=4 size=8", + "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", + "[20] TYPE_TAG 'tag1' type_id=1", + "[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n" + "\t'v1' val=-1\n" + "\t'v2' val=4886718345", + "[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n" + "\t'v1' val=18446744073709551615"); + + btf__free(btf); +} + +static void test_btf_add_btf() +{ + struct btf *btf1 = NULL, *btf2 = NULL; + int id; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "btf1")) + return; + + btf2 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf2, "btf2")) + goto cleanup; + + gen_btf(btf1); + gen_btf(btf2); + + id = btf__add_btf(btf1, btf2); + if (!ASSERT_EQ(id, 23, "id")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] CONST '(anon)' type_id=5", + "[4] VOLATILE '(anon)' type_id=3", + "[5] RESTRICT '(anon)' type_id=4", + "[6] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=10", + "[7] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=32 bitfield_size=16", + "[8] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0 bitfield_size=16", + "[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[10] FWD 'struct_fwd' fwd_kind=struct", + "[11] FWD 'union_fwd' fwd_kind=union", + "[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0", + "[13] TYPEDEF 'typedef1' type_id=1", + "[14] FUNC 'func1' type_id=15 linkage=global", + "[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" + "\t'p1' type_id=1\n" + "\t'p2' type_id=2", + "[16] VAR 'var1' type_id=1, linkage=global-alloc", + "[17] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=1 offset=4 size=8", + "[18] DECL_TAG 'tag1' type_id=16 component_idx=-1", + "[19] DECL_TAG 'tag2' type_id=14 component_idx=1", + "[20] TYPE_TAG 'tag1' type_id=1", + "[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n" + "\t'v1' val=-1\n" + "\t'v2' val=4886718345", + "[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n" + "\t'v1' val=18446744073709551615", + + /* types appended from the second BTF */ + "[23] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[24] PTR '(anon)' type_id=23", + "[25] CONST '(anon)' type_id=27", + "[26] VOLATILE '(anon)' type_id=25", + "[27] RESTRICT '(anon)' type_id=26", + "[28] ARRAY '(anon)' type_id=24 index_type_id=23 nr_elems=10", + "[29] STRUCT 's1' size=8 vlen=2\n" + "\t'f1' type_id=23 bits_offset=0\n" + "\t'f2' type_id=23 bits_offset=32 bitfield_size=16", + "[30] UNION 'u1' size=8 vlen=1\n" + "\t'f1' type_id=23 bits_offset=0 bitfield_size=16", + "[31] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[32] FWD 'struct_fwd' fwd_kind=struct", + "[33] FWD 'union_fwd' fwd_kind=union", + "[34] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0", + "[35] TYPEDEF 'typedef1' type_id=23", + "[36] FUNC 'func1' type_id=37 linkage=global", + "[37] FUNC_PROTO '(anon)' ret_type_id=23 vlen=2\n" + "\t'p1' type_id=23\n" + "\t'p2' type_id=24", + "[38] VAR 'var1' type_id=23, linkage=global-alloc", + "[39] DATASEC 'datasec1' size=12 vlen=1\n" + "\ttype_id=23 offset=4 size=8", + "[40] DECL_TAG 'tag1' type_id=38 component_idx=-1", + "[41] DECL_TAG 'tag2' type_id=36 component_idx=1", + "[42] TYPE_TAG 'tag1' type_id=23", + "[43] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n" + "\t'v1' val=-1\n" + "\t'v2' val=4886718345", + "[44] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n" + "\t'v1' val=18446744073709551615"); + +cleanup: + btf__free(btf1); + btf__free(btf2); +} + +void test_btf_write() +{ + if (test__start_subtest("btf_add")) + test_btf_add(); + if (test__start_subtest("btf_add_btf")) + test_btf_add_btf(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cb_refs.c b/tools/testing/selftests/bpf/prog_tests/cb_refs.c new file mode 100644 index 000000000..3bff680de --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cb_refs.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "bpf/libbpf.h" +#include <test_progs.h> +#include <network_helpers.h> + +#include "cb_refs.skel.h" + +static char log_buf[1024 * 1024]; + +struct { + const char *prog_name; + const char *err_msg; +} cb_refs_tests[] = { + { "underflow_prog", "reference has not been acquired before" }, + { "leak_prog", "Unreleased reference" }, + { "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */ + { "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */ +}; + +void test_cb_refs(void) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct bpf_program *prog; + struct cb_refs *skel; + int i; + + for (i = 0; i < ARRAY_SIZE(cb_refs_tests); i++) { + LIBBPF_OPTS(bpf_test_run_opts, run_opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + skel = cb_refs__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "cb_refs__open_and_load")) + return; + prog = bpf_object__find_program_by_name(skel->obj, cb_refs_tests[i].prog_name); + bpf_program__set_autoload(prog, true); + if (!ASSERT_ERR(cb_refs__load(skel), "cb_refs__load")) + bpf_prog_test_run_opts(bpf_program__fd(prog), &run_opts); + if (!ASSERT_OK_PTR(strstr(log_buf, cb_refs_tests[i].err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", cb_refs_tests[i].err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + cb_refs__destroy(skel); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c new file mode 100644 index 000000000..63ee892bc --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2020 Google LLC. + */ + +#include <test_progs.h> +#include <cgroup_helpers.h> +#include <network_helpers.h> + +#include "progs/cg_storage_multi.h" + +#include "cg_storage_multi_egress_only.skel.h" +#include "cg_storage_multi_isolated.skel.h" +#include "cg_storage_multi_shared.skel.h" + +#define PARENT_CGROUP "/cgroup_storage" +#define CHILD_CGROUP "/cgroup_storage/child" + +static int duration; + +static bool assert_storage(struct bpf_map *map, const void *key, + struct cgroup_value *expected) +{ + struct cgroup_value value; + int map_fd; + + map_fd = bpf_map__fd(map); + + if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) < 0, + "map-lookup", "errno %d", errno)) + return true; + if (CHECK(memcmp(&value, expected, sizeof(struct cgroup_value)), + "assert-storage", "storages differ")) + return true; + + return false; +} + +static bool assert_storage_noexist(struct bpf_map *map, const void *key) +{ + struct cgroup_value value; + int map_fd; + + map_fd = bpf_map__fd(map); + + if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) == 0, + "map-lookup", "succeeded, expected ENOENT")) + return true; + if (CHECK(errno != ENOENT, + "map-lookup", "errno %d, expected ENOENT", errno)) + return true; + + return false; +} + +static bool connect_send(const char *cgroup_path) +{ + int server_fd = -1, client_fd = -1; + char message[] = "message"; + bool res = true; + + if (join_cgroup(cgroup_path)) + goto out_clean; + + server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0); + if (server_fd < 0) + goto out_clean; + + client_fd = connect_to_fd(server_fd, 0); + if (client_fd < 0) + goto out_clean; + + if (send(client_fd, &message, sizeof(message), 0) < 0) + goto out_clean; + + if (read(server_fd, &message, sizeof(message)) < 0) + goto out_clean; + + res = false; + +out_clean: + close(client_fd); + close(server_fd); + return res; +} + +static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) +{ + struct cg_storage_multi_egress_only *obj; + struct cgroup_value expected_cgroup_value; + struct bpf_cgroup_storage_key key; + struct bpf_link *parent_link = NULL, *child_link = NULL; + bool err; + + key.attach_type = BPF_CGROUP_INET_EGRESS; + + obj = cg_storage_multi_egress_only__open_and_load(); + if (CHECK(!obj, "skel-load", "errno %d", errno)) + return; + + /* Attach to parent cgroup, trigger packet from child. + * Assert that there is only one run and in that run the storage is + * parent cgroup's storage. + * Also assert that child cgroup's storage does not exist + */ + parent_link = bpf_program__attach_cgroup(obj->progs.egress, + parent_cgroup_fd); + if (!ASSERT_OK_PTR(parent_link, "parent-cg-attach")) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "first-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 1, + "first-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP); + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP); + if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) + goto close_bpf_object; + + /* Attach to parent and child cgroup, trigger packet from child. + * Assert that there are two additional runs, one that run with parent + * cgroup's storage and one with child cgroup's storage. + */ + child_link = bpf_program__attach_cgroup(obj->progs.egress, + child_cgroup_fd); + if (!ASSERT_OK_PTR(child_link, "child-cg-attach")) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "second-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 3, + "second-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP); + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP); + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(parent_link); + bpf_link__destroy(child_link); + + cg_storage_multi_egress_only__destroy(obj); +} + +static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd) +{ + struct cg_storage_multi_isolated *obj; + struct cgroup_value expected_cgroup_value; + struct bpf_cgroup_storage_key key; + struct bpf_link *parent_egress1_link = NULL, *parent_egress2_link = NULL; + struct bpf_link *child_egress1_link = NULL, *child_egress2_link = NULL; + struct bpf_link *parent_ingress_link = NULL, *child_ingress_link = NULL; + bool err; + + obj = cg_storage_multi_isolated__open_and_load(); + if (CHECK(!obj, "skel-load", "errno %d", errno)) + return; + + /* Attach to parent cgroup, trigger packet from child. + * Assert that there is three runs, two with parent cgroup egress and + * one with parent cgroup ingress, stored in separate parent storages. + * Also assert that child cgroup's storages does not exist + */ + parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1, + parent_cgroup_fd); + if (!ASSERT_OK_PTR(parent_egress1_link, "parent-egress1-cg-attach")) + goto close_bpf_object; + parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2, + parent_cgroup_fd); + if (!ASSERT_OK_PTR(parent_egress2_link, "parent-egress2-cg-attach")) + goto close_bpf_object; + parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress, + parent_cgroup_fd); + if (!ASSERT_OK_PTR(parent_ingress_link, "parent-ingress-cg-attach")) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "first-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 3, + "first-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP); + key.attach_type = BPF_CGROUP_INET_EGRESS; + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.attach_type = BPF_CGROUP_INET_INGRESS; + expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 1 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP); + key.attach_type = BPF_CGROUP_INET_EGRESS; + if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) + goto close_bpf_object; + key.attach_type = BPF_CGROUP_INET_INGRESS; + if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) + goto close_bpf_object; + + /* Attach to parent and child cgroup, trigger packet from child. + * Assert that there is six additional runs, parent cgroup egresses and + * ingress, child cgroup egresses and ingress. + * Assert that egree and ingress storages are separate. + */ + child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1, + child_cgroup_fd); + if (!ASSERT_OK_PTR(child_egress1_link, "child-egress1-cg-attach")) + goto close_bpf_object; + child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2, + child_cgroup_fd); + if (!ASSERT_OK_PTR(child_egress2_link, "child-egress2-cg-attach")) + goto close_bpf_object; + child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress, + child_cgroup_fd); + if (!ASSERT_OK_PTR(child_ingress_link, "child-ingress-cg-attach")) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "second-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 9, + "second-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP); + key.attach_type = BPF_CGROUP_INET_EGRESS; + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 4 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.attach_type = BPF_CGROUP_INET_INGRESS; + expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 2 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP); + key.attach_type = BPF_CGROUP_INET_EGRESS; + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.attach_type = BPF_CGROUP_INET_INGRESS; + expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 1 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(parent_egress1_link); + bpf_link__destroy(parent_egress2_link); + bpf_link__destroy(parent_ingress_link); + bpf_link__destroy(child_egress1_link); + bpf_link__destroy(child_egress2_link); + bpf_link__destroy(child_ingress_link); + + cg_storage_multi_isolated__destroy(obj); +} + +static void test_shared(int parent_cgroup_fd, int child_cgroup_fd) +{ + struct cg_storage_multi_shared *obj; + struct cgroup_value expected_cgroup_value; + __u64 key; + struct bpf_link *parent_egress1_link = NULL, *parent_egress2_link = NULL; + struct bpf_link *child_egress1_link = NULL, *child_egress2_link = NULL; + struct bpf_link *parent_ingress_link = NULL, *child_ingress_link = NULL; + bool err; + + obj = cg_storage_multi_shared__open_and_load(); + if (CHECK(!obj, "skel-load", "errno %d", errno)) + return; + + /* Attach to parent cgroup, trigger packet from child. + * Assert that there is three runs, two with parent cgroup egress and + * one with parent cgroup ingress. + * Also assert that child cgroup's storage does not exist + */ + parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1, + parent_cgroup_fd); + if (!ASSERT_OK_PTR(parent_egress1_link, "parent-egress1-cg-attach")) + goto close_bpf_object; + parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2, + parent_cgroup_fd); + if (!ASSERT_OK_PTR(parent_egress2_link, "parent-egress2-cg-attach")) + goto close_bpf_object; + parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress, + parent_cgroup_fd); + if (!ASSERT_OK_PTR(parent_ingress_link, "parent-ingress-cg-attach")) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "first-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 3, + "first-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key = get_cgroup_id(PARENT_CGROUP); + expected_cgroup_value = (struct cgroup_value) { + .egress_pkts = 2, + .ingress_pkts = 1, + }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key = get_cgroup_id(CHILD_CGROUP); + if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) + goto close_bpf_object; + + /* Attach to parent and child cgroup, trigger packet from child. + * Assert that there is six additional runs, parent cgroup egresses and + * ingress, child cgroup egresses and ingress. + */ + child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1, + child_cgroup_fd); + if (!ASSERT_OK_PTR(child_egress1_link, "child-egress1-cg-attach")) + goto close_bpf_object; + child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2, + child_cgroup_fd); + if (!ASSERT_OK_PTR(child_egress2_link, "child-egress2-cg-attach")) + goto close_bpf_object; + child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress, + child_cgroup_fd); + if (!ASSERT_OK_PTR(child_ingress_link, "child-ingress-cg-attach")) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "second-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 9, + "second-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key = get_cgroup_id(PARENT_CGROUP); + expected_cgroup_value = (struct cgroup_value) { + .egress_pkts = 4, + .ingress_pkts = 2, + }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key = get_cgroup_id(CHILD_CGROUP); + expected_cgroup_value = (struct cgroup_value) { + .egress_pkts = 2, + .ingress_pkts = 1, + }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(parent_egress1_link); + bpf_link__destroy(parent_egress2_link); + bpf_link__destroy(parent_ingress_link); + bpf_link__destroy(child_egress1_link); + bpf_link__destroy(child_egress2_link); + bpf_link__destroy(child_ingress_link); + + cg_storage_multi_shared__destroy(obj); +} + +void serial_test_cg_storage_multi(void) +{ + int parent_cgroup_fd = -1, child_cgroup_fd = -1; + + parent_cgroup_fd = test__join_cgroup(PARENT_CGROUP); + if (CHECK(parent_cgroup_fd < 0, "cg-create-parent", "errno %d", errno)) + goto close_cgroup_fd; + child_cgroup_fd = create_and_get_cgroup(CHILD_CGROUP); + if (CHECK(child_cgroup_fd < 0, "cg-create-child", "errno %d", errno)) + goto close_cgroup_fd; + + if (test__start_subtest("egress_only")) + test_egress_only(parent_cgroup_fd, child_cgroup_fd); + + if (test__start_subtest("isolated")) + test_isolated(parent_cgroup_fd, child_cgroup_fd); + + if (test__start_subtest("shared")) + test_shared(parent_cgroup_fd, child_cgroup_fd); + +close_cgroup_fd: + close(child_cgroup_fd); + close(parent_cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c new file mode 100644 index 000000000..9367bd2f0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "cgroup_helpers.h" + +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +static char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int prog_load(void) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = ARRAY_SIZE(prog); + + return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); +} + +void serial_test_cgroup_attach_autodetach(void) +{ + __u32 duration = 0, prog_cnt = 4, attach_flags; + int allow_prog[2] = {-1}; + __u32 prog_ids[2] = {0}; + void *ptr = NULL; + int cg = 0, i; + int attempts; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + allow_prog[i] = prog_load(); + if (CHECK(allow_prog[i] < 0, "prog_load", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + } + + if (CHECK_FAIL(setup_cgroup_environment())) + goto err; + + /* create a cgroup, attach two programs and remember their ids */ + cg = create_and_get_cgroup("/cg_autodetach"); + if (CHECK_FAIL(cg < 0)) + goto err; + + if (CHECK_FAIL(join_cgroup("/cg_autodetach"))) + goto err; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (CHECK(bpf_prog_attach(allow_prog[i], cg, + BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog_attach", "prog[%d], errno=%d\n", i, errno)) + goto err; + + /* make sure that programs are attached and run some traffic */ + if (CHECK(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags, + prog_ids, &prog_cnt), + "prog_query", "errno=%d\n", errno)) + goto err; + if (CHECK_FAIL(system(PING_CMD))) + goto err; + + /* allocate some memory (4Mb) to pin the original cgroup */ + ptr = malloc(4 * (1 << 20)); + if (CHECK_FAIL(!ptr)) + goto err; + + /* close programs and cgroup fd */ + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + close(allow_prog[i]); + allow_prog[i] = -1; + } + + close(cg); + cg = 0; + + /* leave the cgroup and remove it. don't detach programs */ + cleanup_cgroup_environment(); + + /* wait for the asynchronous auto-detachment. + * wait for no more than 5 sec and give up. + */ + for (i = 0; i < ARRAY_SIZE(prog_ids); i++) { + for (attempts = 5; attempts >= 0; attempts--) { + int fd = bpf_prog_get_fd_by_id(prog_ids[i]); + + if (fd < 0) + break; + + /* don't leave the fd open */ + close(fd); + + if (CHECK_FAIL(!attempts)) + goto err; + + sleep(1); + } + } + +err: + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (allow_prog[i] >= 0) + close(allow_prog[i]); + if (cg) + close(cg); + free(ptr); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c new file mode 100644 index 000000000..db0b7bac7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "cgroup_helpers.h" + +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +static char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int map_fd = -1; + +static int prog_load_cnt(int verdict, int val) +{ + int cgroup_storage_fd, percpu_cgroup_storage_fd; + + if (map_fd < 0) + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); + if (map_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + cgroup_storage_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, + sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL); + if (cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + percpu_cgroup_storage_fd = bpf_map_create( + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL, + sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL); + if (percpu_cgroup_storage_fd < 0) { + printf("failed to create map '%s'\n", strerror(errno)); + return -1; + } + + struct bpf_insn prog[] = { + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), + + BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_MOV64_IMM(BPF_REG_1, val), + BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), + + BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), + + BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = ARRAY_SIZE(prog); + int ret; + + ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); + + close(cgroup_storage_fd); + return ret; +} + +void serial_test_cgroup_attach_multi(void) +{ + __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; + int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; + DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts); + int allow_prog[7] = {-1}; + unsigned long long value; + __u32 duration = 0; + int i = 0; + + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { + allow_prog[i] = prog_load_cnt(1, 1 << i); + if (CHECK(allow_prog[i] < 0, "prog_load", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + } + + if (CHECK_FAIL(setup_cgroup_environment())) + goto err; + + cg1 = create_and_get_cgroup("/cg1"); + if (CHECK_FAIL(cg1 < 0)) + goto err; + cg2 = create_and_get_cgroup("/cg1/cg2"); + if (CHECK_FAIL(cg2 < 0)) + goto err; + cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); + if (CHECK_FAIL(cg3 < 0)) + goto err; + cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); + if (CHECK_FAIL(cg4 < 0)) + goto err; + cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); + if (CHECK_FAIL(cg5 < 0)) + goto err; + + if (CHECK_FAIL(join_cgroup("/cg1/cg2/cg3/cg4/cg5"))) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog0_attach_to_cg1_multi", "errno=%d\n", errno)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "fail_same_prog_attach_to_cg1", "unexpected success\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog1_attach_to_cg1_multi", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog2_attach_to_cg2_override", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI), + "prog3_attach_to_cg3_multi", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog4_attach_to_cg4_override", "errno=%d\n", errno)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0), + "prog5_attach_to_cg5_none", "errno=%d\n", errno)) + goto err; + + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 1 + 2 + 8 + 32); + + /* query the number of effective progs in cg5 */ + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, NULL, NULL, &prog_cnt)); + CHECK_FAIL(prog_cnt != 4); + /* retrieve prog_ids of effective progs in cg5 */ + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 4); + CHECK_FAIL(attach_flags != 0); + saved_prog_id = prog_ids[0]; + /* check enospc handling */ + prog_ids[0] = 0; + prog_cnt = 2; + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt) >= 0); + CHECK_FAIL(errno != ENOSPC); + CHECK_FAIL(prog_cnt != 4); + /* check that prog_ids are returned even when buffer is too small */ + CHECK_FAIL(prog_ids[0] != saved_prog_id); + /* retrieve prog_id of single attached prog in cg5 */ + prog_ids[0] = 0; + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 1); + CHECK_FAIL(prog_ids[0] != saved_prog_id); + + /* detach bottom program and ping again */ + if (CHECK(bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS), + "prog_detach_from_cg5", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 1 + 2 + 8 + 16); + + /* test replace */ + + attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE; + attach_opts.replace_prog_fd = allow_prog[0]; + if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_override", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != EINVAL); + + attach_opts.flags = BPF_F_REPLACE; + if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_no_multi", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != EINVAL); + + attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE; + attach_opts.replace_prog_fd = -1; + if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_bad_fd", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != EBADF); + + /* replacing a program that is not attached to cgroup should fail */ + attach_opts.replace_prog_fd = allow_prog[3]; + if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "fail_prog_replace_no_ent", "unexpected success\n")) + goto err; + CHECK_FAIL(errno != ENOENT); + + /* replace 1st from the top program */ + attach_opts.replace_prog_fd = allow_prog[0]; + if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "prog_replace", "errno=%d\n", errno)) + goto err; + + /* replace program with itself */ + attach_opts.replace_prog_fd = allow_prog[6]; + if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1, + BPF_CGROUP_INET_EGRESS, &attach_opts), + "prog_replace", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 64 + 2 + 8 + 16); + + /* detach 3rd from bottom program and ping again */ + if (CHECK(!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS), + "fail_prog_detach_from_cg3", "unexpected success\n")) + goto err; + + if (CHECK(bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS), + "prog3_detach_from_cg3", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 64 + 2 + 16); + + /* detach 2nd from bottom program and ping again */ + if (CHECK(bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS), + "prog_detach_from_cg4", "errno=%d\n", errno)) + goto err; + + value = 0; + CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); + CHECK_FAIL(system(PING_CMD)); + CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); + CHECK_FAIL(value != 64 + 2 + 4); + + prog_cnt = 4; + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, &attach_flags, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 3); + CHECK_FAIL(attach_flags != 0); + CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, + prog_ids, &prog_cnt)); + CHECK_FAIL(prog_cnt != 0); + +err: + for (i = 0; i < ARRAY_SIZE(allow_prog); i++) + if (allow_prog[i] >= 0) + close(allow_prog[i]); + close(cg1); + close(cg2); + close(cg3); + close(cg4); + close(cg5); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c new file mode 100644 index 000000000..9421a5b7f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "cgroup_helpers.h" + +#define FOO "/foo" +#define BAR "/foo/bar/" +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +static char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int prog_load(int verdict) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ + BPF_EXIT_INSN(), + }; + size_t insns_cnt = ARRAY_SIZE(prog); + + return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, + prog, insns_cnt, "GPL", 0, + bpf_log_buf, BPF_LOG_BUF_SIZE); +} + +void serial_test_cgroup_attach_override(void) +{ + int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1; + __u32 duration = 0; + + allow_prog = prog_load(1); + if (CHECK(allow_prog < 0, "prog_load_allow", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + + drop_prog = prog_load(0); + if (CHECK(drop_prog < 0, "prog_load_drop", + "verifier output:\n%s\n-------\n", bpf_log_buf)) + goto err; + + foo = test__join_cgroup(FOO); + if (CHECK(foo < 0, "cgroup_join_foo", "cgroup setup failed\n")) + goto err; + + if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_drop_foo_override", + "attach prog to %s failed, errno=%d\n", FOO, errno)) + goto err; + + if (CHECK(!system(PING_CMD), "ping_fail", + "ping unexpectedly succeeded\n")) + goto err; + + bar = test__join_cgroup(BAR); + if (CHECK(bar < 0, "cgroup_join_bar", "cgroup setup failed\n")) + goto err; + + if (CHECK(!system(PING_CMD), "ping_fail", + "ping unexpectedly succeeded\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_allow_bar_override", + "attach prog to %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n")) + goto err; + + if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS), + "prog_detach_bar", + "detach prog from %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(!system(PING_CMD), "ping_fail", + "ping unexpectedly succeeded\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_allow_bar_override", + "attach prog to %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS), + "prog_detach_foo", + "detach prog from %s failed, errno=%d\n", FOO, errno)) + goto err; + + if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n")) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "prog_attach_allow_bar_override", + "attach prog to %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0), + "fail_prog_attach_allow_bar_none", + "attach prog to %s unexpectedly succeeded\n", BAR)) + goto err; + + if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS), + "prog_detach_bar", + "detach prog from %s failed, errno=%d\n", BAR, errno)) + goto err; + + if (CHECK(!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS), + "fail_prog_detach_foo", + "double detach from %s unexpectedly succeeded\n", FOO)) + goto err; + + if (CHECK(bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0), + "prog_attach_allow_foo_none", + "attach prog to %s failed, errno=%d\n", FOO, errno)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0), + "fail_prog_attach_allow_bar_none", + "attach prog to %s unexpectedly succeeded\n", BAR)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "fail_prog_attach_allow_bar_override", + "attach prog to %s unexpectedly succeeded\n", BAR)) + goto err; + + if (CHECK(!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_OVERRIDE), + "fail_prog_attach_allow_foo_override", + "attach prog to %s unexpectedly succeeded\n", FOO)) + goto err; + + if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0), + "prog_attach_drop_foo_none", + "attach prog to %s failed, errno=%d\n", FOO, errno)) + goto err; + +err: + close(foo); + close(bar); + close(allow_prog); + close(drop_prog); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c new file mode 100644 index 000000000..4d2fa9927 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c @@ -0,0 +1,529 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2021 Google LLC. + */ + +#include <test_progs.h> +#include <cgroup_helpers.h> +#include <network_helpers.h> + +#include "cgroup_getset_retval_setsockopt.skel.h" +#include "cgroup_getset_retval_getsockopt.skel.h" +#include "cgroup_getset_retval_hooks.skel.h" + +#define SOL_CUSTOM 0xdeadbeef + +static int zero; + +static void test_setsockopt_set(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_set_eunatch = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that sets EUNATCH, assert that + * we actually get that error when we run setsockopt() + */ + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eunatch); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_set_eunatch = NULL, *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that sets EUNATCH, and one that gets the + * previously set errno. Assert that we get the same errno back. + */ + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eunatch); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that gets the previously set errno. + * Assert that, without anything setting one, we get 0. + */ + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_OK(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_get_retval = NULL, *link_set_eunatch = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that gets the previously set errno, and then + * one that sets the errno to EUNATCH. Assert that the get does not + * see EUNATCH set later, and does not prevent EUNATCH from being set. + */ + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_get_retval); + bpf_link__destroy(link_set_eunatch); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_override(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_set_eunatch = NULL, *link_set_eisconn = NULL; + struct bpf_link *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that sets EUNATCH, then one that sets EISCONN, + * and then one that gets the exported errno. Assert both the syscall + * and the helper sees the last set errno. + */ + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EISCONN, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EISCONN, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eunatch); + bpf_link__destroy(link_set_eisconn); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_legacy_eperm = NULL, *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that return a reject without setting errno + * (legacy reject), and one that gets the errno. Assert that for + * backward compatibility the syscall result in EPERM, and this + * is also visible to the helper. + */ + link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm, + cgroup_fd); + if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EPERM, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EPERM, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_legacy_eperm); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_set_eunatch = NULL, *link_legacy_eperm = NULL; + struct bpf_link *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that sets EUNATCH, then one that return a reject + * without setting errno, and then one that gets the exported errno. + * Assert both the syscall and the helper's errno are unaffected by + * the second prog (i.e. legacy rejects does not override the errno + * to EPERM). + */ + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm, + cgroup_fd); + if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eunatch); + bpf_link__destroy(link_legacy_eperm); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_getsockopt_get(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_getsockopt *obj; + struct bpf_link *link_get_retval = NULL; + int buf; + socklen_t optlen = sizeof(buf); + + obj = cgroup_getset_retval_getsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach getsockopt that gets previously set errno. Assert that the + * error from kernel is in both ctx_retval_value and retval_value. + */ + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0, + &buf, &optlen), "getsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EOPNOTSUPP, "getsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EOPNOTSUPP, "retval_value")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->ctx_retval_value, -EOPNOTSUPP, "ctx_retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_getsockopt__destroy(obj); +} + +static void test_getsockopt_override(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_getsockopt *obj; + struct bpf_link *link_set_eisconn = NULL; + int buf; + socklen_t optlen = sizeof(buf); + + obj = cgroup_getset_retval_getsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach getsockopt that sets retval to -EISCONN. Assert that this + * overrides the value from kernel. + */ + link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn")) + goto close_bpf_object; + + if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0, + &buf, &optlen), "getsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EISCONN, "getsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eisconn); + + cgroup_getset_retval_getsockopt__destroy(obj); +} + +static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_getsockopt *obj; + struct bpf_link *link_set_eisconn = NULL, *link_clear_retval = NULL; + struct bpf_link *link_get_retval = NULL; + int buf; + socklen_t optlen = sizeof(buf); + + obj = cgroup_getset_retval_getsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach getsockopt that sets retval to -EISCONN, and one that clears + * ctx retval. Assert that the clearing ctx retval is synced to helper + * and clears any errors both from kernel and BPF.. + */ + link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn")) + goto close_bpf_object; + link_clear_retval = bpf_program__attach_cgroup(obj->progs.clear_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_clear_retval, "cg-attach-clear_retval")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_OK(getsockopt(sock_fd, SOL_CUSTOM, 0, + &buf, &optlen), "getsockopt")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->ctx_retval_value, 0, "ctx_retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eisconn); + bpf_link__destroy(link_clear_retval); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_getsockopt__destroy(obj); +} + +struct exposed_hook { + const char *name; + int expected_err; +} exposed_hooks[] = { + +#define BPF_RETVAL_HOOK(NAME, SECTION, CTX, EXPECTED_ERR) \ + { \ + .name = #NAME, \ + .expected_err = EXPECTED_ERR, \ + }, + +#include "cgroup_getset_retval_hooks.h" + +#undef BPF_RETVAL_HOOK +}; + +static void test_exposed_hooks(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_hooks *skel; + struct bpf_program *prog; + int err; + int i; + + for (i = 0; i < ARRAY_SIZE(exposed_hooks); i++) { + skel = cgroup_getset_retval_hooks__open(); + if (!ASSERT_OK_PTR(skel, "cgroup_getset_retval_hooks__open")) + continue; + + prog = bpf_object__find_program_by_name(skel->obj, exposed_hooks[i].name); + if (!ASSERT_NEQ(prog, NULL, "bpf_object__find_program_by_name")) + goto close_skel; + + err = bpf_program__set_autoload(prog, true); + if (!ASSERT_OK(err, "bpf_program__set_autoload")) + goto close_skel; + + err = cgroup_getset_retval_hooks__load(skel); + ASSERT_EQ(err, exposed_hooks[i].expected_err, "expected_err"); + +close_skel: + cgroup_getset_retval_hooks__destroy(skel); + } +} + +void test_cgroup_getset_retval(void) +{ + int cgroup_fd = -1; + int sock_fd = -1; + + cgroup_fd = test__join_cgroup("/cgroup_getset_retval"); + if (!ASSERT_GE(cgroup_fd, 0, "cg-create")) + goto close_fd; + + sock_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0); + if (!ASSERT_GE(sock_fd, 0, "start-server")) + goto close_fd; + + if (test__start_subtest("setsockopt-set")) + test_setsockopt_set(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-set_and_get")) + test_setsockopt_set_and_get(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-default_zero")) + test_setsockopt_default_zero(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-default_zero_and_set")) + test_setsockopt_default_zero_and_set(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-override")) + test_setsockopt_override(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-legacy_eperm")) + test_setsockopt_legacy_eperm(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-legacy_no_override")) + test_setsockopt_legacy_no_override(cgroup_fd, sock_fd); + + if (test__start_subtest("getsockopt-get")) + test_getsockopt_get(cgroup_fd, sock_fd); + + if (test__start_subtest("getsockopt-override")) + test_getsockopt_override(cgroup_fd, sock_fd); + + if (test__start_subtest("getsockopt-retval_sync")) + test_getsockopt_retval_sync(cgroup_fd, sock_fd); + + if (test__start_subtest("exposed_hooks")) + test_exposed_hooks(cgroup_fd, sock_fd); + +close_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c new file mode 100644 index 000000000..3bd27d2ea --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This test makes sure BPF stats collection using rstat works correctly. + * The test uses 3 BPF progs: + * (a) counter: This BPF prog is invoked every time we attach a process to a + * cgroup and locklessly increments a percpu counter. + * The program then calls cgroup_rstat_updated() to inform rstat + * of an update on the (cpu, cgroup) pair. + * + * (b) flusher: This BPF prog is invoked when an rstat flush is ongoing, it + * aggregates all percpu counters to a total counter, and also + * propagates the changes to the ancestor cgroups. + * + * (c) dumper: This BPF prog is a cgroup_iter. It is used to output the total + * counter of a cgroup through reading a file in userspace. + * + * The test sets up a cgroup hierarchy, and the above programs. It spawns a few + * processes in the leaf cgroups and makes sure all the counters are aggregated + * correctly. + * + * Copyright 2022 Google LLC. + */ +#include <asm-generic/errno.h> +#include <errno.h> +#include <sys/types.h> +#include <sys/mount.h> +#include <sys/stat.h> +#include <unistd.h> + +#include <test_progs.h> +#include <bpf/libbpf.h> +#include <bpf/bpf.h> + +#include "cgroup_helpers.h" +#include "cgroup_hierarchical_stats.skel.h" + +#define PAGE_SIZE 4096 +#define MB(x) (x << 20) + +#define PROCESSES_PER_CGROUP 3 + +#define BPFFS_ROOT "/sys/fs/bpf/" +#define BPFFS_ATTACH_COUNTERS BPFFS_ROOT "attach_counters/" + +#define CG_ROOT_NAME "root" +#define CG_ROOT_ID 1 + +#define CGROUP_PATH(p, n) {.path = p"/"n, .name = n} + +static struct { + const char *path, *name; + unsigned long long id; + int fd; +} cgroups[] = { + CGROUP_PATH("/", "test"), + CGROUP_PATH("/test", "child1"), + CGROUP_PATH("/test", "child2"), + CGROUP_PATH("/test/child1", "child1_1"), + CGROUP_PATH("/test/child1", "child1_2"), + CGROUP_PATH("/test/child2", "child2_1"), + CGROUP_PATH("/test/child2", "child2_2"), +}; + +#define N_CGROUPS ARRAY_SIZE(cgroups) +#define N_NON_LEAF_CGROUPS 3 + +static int root_cgroup_fd; +static bool mounted_bpffs; + +/* reads file at 'path' to 'buf', returns 0 on success. */ +static int read_from_file(const char *path, char *buf, size_t size) +{ + int fd, len; + + fd = open(path, O_RDONLY); + if (fd < 0) + return fd; + + len = read(fd, buf, size); + close(fd); + if (len < 0) + return len; + + buf[len] = 0; + return 0; +} + +/* mounts bpffs and mkdir for reading stats, returns 0 on success. */ +static int setup_bpffs(void) +{ + int err; + + /* Mount bpffs */ + err = mount("bpf", BPFFS_ROOT, "bpf", 0, NULL); + mounted_bpffs = !err; + if (ASSERT_FALSE(err && errno != EBUSY, "mount")) + return err; + + /* Create a directory to contain stat files in bpffs */ + err = mkdir(BPFFS_ATTACH_COUNTERS, 0755); + if (!ASSERT_OK(err, "mkdir")) + return err; + + return 0; +} + +static void cleanup_bpffs(void) +{ + /* Remove created directory in bpffs */ + ASSERT_OK(rmdir(BPFFS_ATTACH_COUNTERS), "rmdir "BPFFS_ATTACH_COUNTERS); + + /* Unmount bpffs, if it wasn't already mounted when we started */ + if (mounted_bpffs) + return; + + ASSERT_OK(umount(BPFFS_ROOT), "unmount bpffs"); +} + +/* sets up cgroups, returns 0 on success. */ +static int setup_cgroups(void) +{ + int i, fd, err; + + err = setup_cgroup_environment(); + if (!ASSERT_OK(err, "setup_cgroup_environment")) + return err; + + root_cgroup_fd = get_root_cgroup(); + if (!ASSERT_GE(root_cgroup_fd, 0, "get_root_cgroup")) + return root_cgroup_fd; + + for (i = 0; i < N_CGROUPS; i++) { + fd = create_and_get_cgroup(cgroups[i].path); + if (!ASSERT_GE(fd, 0, "create_and_get_cgroup")) + return fd; + + cgroups[i].fd = fd; + cgroups[i].id = get_cgroup_id(cgroups[i].path); + } + return 0; +} + +static void cleanup_cgroups(void) +{ + close(root_cgroup_fd); + for (int i = 0; i < N_CGROUPS; i++) + close(cgroups[i].fd); + cleanup_cgroup_environment(); +} + +/* Sets up cgroup hiearchary, returns 0 on success. */ +static int setup_hierarchy(void) +{ + return setup_bpffs() || setup_cgroups(); +} + +static void destroy_hierarchy(void) +{ + cleanup_cgroups(); + cleanup_bpffs(); +} + +static int attach_processes(void) +{ + int i, j, status; + + /* In every leaf cgroup, attach 3 processes */ + for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++) { + for (j = 0; j < PROCESSES_PER_CGROUP; j++) { + pid_t pid; + + /* Create child and attach to cgroup */ + pid = fork(); + if (pid == 0) { + if (join_parent_cgroup(cgroups[i].path)) + exit(EACCES); + exit(0); + } + + /* Cleanup child */ + waitpid(pid, &status, 0); + if (!ASSERT_TRUE(WIFEXITED(status), "child process exited")) + return 1; + if (!ASSERT_EQ(WEXITSTATUS(status), 0, + "child process exit code")) + return 1; + } + } + return 0; +} + +static unsigned long long +get_attach_counter(unsigned long long cgroup_id, const char *file_name) +{ + unsigned long long attach_counter = 0, id = 0; + static char buf[128], path[128]; + + /* For every cgroup, read the file generated by cgroup_iter */ + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name); + if (!ASSERT_OK(read_from_file(path, buf, 128), "read cgroup_iter")) + return 0; + + /* Check the output file formatting */ + ASSERT_EQ(sscanf(buf, "cg_id: %llu, attach_counter: %llu\n", + &id, &attach_counter), 2, "output format"); + + /* Check that the cgroup_id is displayed correctly */ + ASSERT_EQ(id, cgroup_id, "cgroup_id"); + /* Check that the counter is non-zero */ + ASSERT_GT(attach_counter, 0, "attach counter non-zero"); + return attach_counter; +} + +static void check_attach_counters(void) +{ + unsigned long long attach_counters[N_CGROUPS], root_attach_counter; + int i; + + for (i = 0; i < N_CGROUPS; i++) + attach_counters[i] = get_attach_counter(cgroups[i].id, + cgroups[i].name); + + /* Read stats for root too */ + root_attach_counter = get_attach_counter(CG_ROOT_ID, CG_ROOT_NAME); + + /* Check that all leafs cgroups have an attach counter of 3 */ + for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++) + ASSERT_EQ(attach_counters[i], PROCESSES_PER_CGROUP, + "leaf cgroup attach counter"); + + /* Check that child1 == child1_1 + child1_2 */ + ASSERT_EQ(attach_counters[1], attach_counters[3] + attach_counters[4], + "child1_counter"); + /* Check that child2 == child2_1 + child2_2 */ + ASSERT_EQ(attach_counters[2], attach_counters[5] + attach_counters[6], + "child2_counter"); + /* Check that test == child1 + child2 */ + ASSERT_EQ(attach_counters[0], attach_counters[1] + attach_counters[2], + "test_counter"); + /* Check that root >= test */ + ASSERT_GE(root_attach_counter, attach_counters[1], "root_counter"); +} + +/* Creates iter link and pins in bpffs, returns 0 on success, -errno on failure. + */ +static int setup_cgroup_iter(struct cgroup_hierarchical_stats *obj, + int cgroup_fd, const char *file_name) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo = {}; + struct bpf_link *link; + static char path[128]; + int err; + + /* + * Create an iter link, parameterized by cgroup_fd. We only want to + * traverse one cgroup, so set the traversal order to "self". + */ + linfo.cgroup.cgroup_fd = cgroup_fd; + linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(obj->progs.dumper, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return -EFAULT; + + /* Pin the link to a bpffs file */ + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name); + err = bpf_link__pin(link, path); + ASSERT_OK(err, "pin cgroup_iter"); + + /* Remove the link, leaving only the ref held by the pinned file */ + bpf_link__destroy(link); + return err; +} + +/* Sets up programs for collecting stats, returns 0 on success. */ +static int setup_progs(struct cgroup_hierarchical_stats **skel) +{ + int i, err; + + *skel = cgroup_hierarchical_stats__open_and_load(); + if (!ASSERT_OK_PTR(*skel, "open_and_load")) + return 1; + + /* Attach cgroup_iter program that will dump the stats to cgroups */ + for (i = 0; i < N_CGROUPS; i++) { + err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name); + if (!ASSERT_OK(err, "setup_cgroup_iter")) + return err; + } + + /* Also dump stats for root */ + err = setup_cgroup_iter(*skel, root_cgroup_fd, CG_ROOT_NAME); + if (!ASSERT_OK(err, "setup_cgroup_iter")) + return err; + + bpf_program__set_autoattach((*skel)->progs.dumper, false); + err = cgroup_hierarchical_stats__attach(*skel); + if (!ASSERT_OK(err, "attach")) + return err; + + return 0; +} + +static void destroy_progs(struct cgroup_hierarchical_stats *skel) +{ + static char path[128]; + int i; + + for (i = 0; i < N_CGROUPS; i++) { + /* Delete files in bpffs that cgroup_iters are pinned in */ + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, + cgroups[i].name); + ASSERT_OK(remove(path), "remove cgroup_iter pin"); + } + + /* Delete root file in bpffs */ + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, CG_ROOT_NAME); + ASSERT_OK(remove(path), "remove cgroup_iter root pin"); + cgroup_hierarchical_stats__destroy(skel); +} + +void test_cgroup_hierarchical_stats(void) +{ + struct cgroup_hierarchical_stats *skel = NULL; + + if (setup_hierarchy()) + goto hierarchy_cleanup; + if (setup_progs(&skel)) + goto cleanup; + if (attach_processes()) + goto cleanup; + check_attach_counters(); +cleanup: + destroy_progs(skel); +hierarchy_cleanup: + destroy_hierarchy(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c new file mode 100644 index 000000000..c4a2adb38 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Google */ + +#include <test_progs.h> +#include <bpf/libbpf.h> +#include <bpf/btf.h> +#include "cgroup_iter.skel.h" +#include "cgroup_helpers.h" + +#define ROOT 0 +#define PARENT 1 +#define CHILD1 2 +#define CHILD2 3 +#define NUM_CGROUPS 4 + +#define PROLOGUE "prologue\n" +#define EPILOGUE "epilogue\n" + +static const char *cg_path[] = { + "/", "/parent", "/parent/child1", "/parent/child2" +}; + +static int cg_fd[] = {-1, -1, -1, -1}; +static unsigned long long cg_id[] = {0, 0, 0, 0}; +static char expected_output[64]; + +static int setup_cgroups(void) +{ + int fd, i = 0; + + for (i = 0; i < NUM_CGROUPS; i++) { + fd = create_and_get_cgroup(cg_path[i]); + if (fd < 0) + return fd; + + cg_fd[i] = fd; + cg_id[i] = get_cgroup_id(cg_path[i]); + } + return 0; +} + +static void cleanup_cgroups(void) +{ + int i; + + for (i = 0; i < NUM_CGROUPS; i++) + close(cg_fd[i]); +} + +static void read_from_cgroup_iter(struct bpf_program *prog, int cgroup_fd, + int order, const char *testname) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link *link; + int len, iter_fd; + static char buf[128]; + size_t left; + char *p; + + memset(&linfo, 0, sizeof(linfo)); + linfo.cgroup.cgroup_fd = cgroup_fd; + linfo.cgroup.order = order; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(prog, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (iter_fd < 0) + goto free_link; + + memset(buf, 0, sizeof(buf)); + left = ARRAY_SIZE(buf); + p = buf; + while ((len = read(iter_fd, p, left)) > 0) { + p += len; + left -= len; + } + + ASSERT_STREQ(buf, expected_output, testname); + + /* read() after iter finishes should be ok. */ + if (len == 0) + ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read"); + + close(iter_fd); +free_link: + bpf_link__destroy(link); +} + +/* Invalid cgroup. */ +static void test_invalid_cgroup(struct cgroup_iter *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link *link; + + memset(&linfo, 0, sizeof(linfo)); + linfo.cgroup.cgroup_fd = (__u32)-1; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts); + ASSERT_ERR_PTR(link, "attach_iter"); + bpf_link__destroy(link); +} + +/* Specifying both cgroup_fd and cgroup_id is invalid. */ +static void test_invalid_cgroup_spec(struct cgroup_iter *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link *link; + + memset(&linfo, 0, sizeof(linfo)); + linfo.cgroup.cgroup_fd = (__u32)cg_fd[PARENT]; + linfo.cgroup.cgroup_id = (__u64)cg_id[PARENT]; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts); + ASSERT_ERR_PTR(link, "attach_iter"); + bpf_link__destroy(link); +} + +/* Preorder walk prints parent and child in order. */ +static void test_walk_preorder(struct cgroup_iter *skel) +{ + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE, + cg_id[PARENT], cg_id[CHILD1], cg_id[CHILD2]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_CGROUP_ITER_DESCENDANTS_PRE, "preorder"); +} + +/* Postorder walk prints child and parent in order. */ +static void test_walk_postorder(struct cgroup_iter *skel) +{ + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE, + cg_id[CHILD1], cg_id[CHILD2], cg_id[PARENT]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_CGROUP_ITER_DESCENDANTS_POST, "postorder"); +} + +/* Walking parents prints parent and then root. */ +static void test_walk_ancestors_up(struct cgroup_iter *skel) +{ + /* terminate the walk when ROOT is met. */ + skel->bss->terminal_cgroup = cg_id[ROOT]; + + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n%8llu\n" EPILOGUE, + cg_id[PARENT], cg_id[ROOT]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_CGROUP_ITER_ANCESTORS_UP, "ancestors_up"); + + skel->bss->terminal_cgroup = 0; +} + +/* Early termination prints parent only. */ +static void test_early_termination(struct cgroup_iter *skel) +{ + /* terminate the walk after the first element is processed. */ + skel->bss->terminate_early = 1; + + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_CGROUP_ITER_DESCENDANTS_PRE, "early_termination"); + + skel->bss->terminate_early = 0; +} + +/* Waling self prints self only. */ +static void test_walk_self_only(struct cgroup_iter *skel) +{ + snprintf(expected_output, sizeof(expected_output), + PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]); + + read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT], + BPF_CGROUP_ITER_SELF_ONLY, "self_only"); +} + +void test_cgroup_iter(void) +{ + struct cgroup_iter *skel = NULL; + + if (setup_cgroup_environment()) + return; + + if (setup_cgroups()) + goto out; + + skel = cgroup_iter__open_and_load(); + if (!ASSERT_OK_PTR(skel, "cgroup_iter__open_and_load")) + goto out; + + if (test__start_subtest("cgroup_iter__invalid_cgroup")) + test_invalid_cgroup(skel); + if (test__start_subtest("cgroup_iter__invalid_cgroup_spec")) + test_invalid_cgroup_spec(skel); + if (test__start_subtest("cgroup_iter__preorder")) + test_walk_preorder(skel); + if (test__start_subtest("cgroup_iter__postorder")) + test_walk_postorder(skel); + if (test__start_subtest("cgroup_iter__ancestors_up_walk")) + test_walk_ancestors_up(skel); + if (test__start_subtest("cgroup_iter__early_termination")) + test_early_termination(skel); + if (test__start_subtest("cgroup_iter__self_only")) + test_walk_self_only(skel); +out: + cgroup_iter__destroy(skel); + cleanup_cgroups(); + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c new file mode 100644 index 000000000..15093a695 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "testing_helpers.h" +#include "test_cgroup_link.skel.h" + +static __u32 duration = 0; +#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +static struct test_cgroup_link *skel = NULL; + +int ping_and_check(int exp_calls, int exp_alt_calls) +{ + skel->bss->calls = 0; + skel->bss->alt_calls = 0; + CHECK_FAIL(system(PING_CMD)); + if (CHECK(skel->bss->calls != exp_calls, "call_cnt", + "exp %d, got %d\n", exp_calls, skel->bss->calls)) + return -EINVAL; + if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt", + "exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls)) + return -EINVAL; + return 0; +} + +void serial_test_cgroup_link(void) +{ + struct { + const char *path; + int fd; + } cgs[] = { + { "/cg1" }, + { "/cg1/cg2" }, + { "/cg1/cg2/cg3" }, + { "/cg1/cg2/cg3/cg4" }, + }; + int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts); + struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link; + __u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags, prog_id; + struct bpf_link_info info; + int i = 0, err, prog_fd; + bool detach_legacy = false; + + skel = test_cgroup_link__open_and_load(); + if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n")) + return; + prog_fd = bpf_program__fd(skel->progs.egress); + + err = setup_cgroup_environment(); + if (CHECK(err, "cg_init", "failed: %d\n", err)) + goto cleanup; + + for (i = 0; i < cg_nr; i++) { + cgs[i].fd = create_and_get_cgroup(cgs[i].path); + if (!ASSERT_GE(cgs[i].fd, 0, "cg_create")) + goto cleanup; + } + + err = join_cgroup(cgs[last_cg].path); + if (CHECK(err, "cg_join", "fail: %d\n", err)) + goto cleanup; + + for (i = 0; i < cg_nr; i++) { + links[i] = bpf_program__attach_cgroup(skel->progs.egress, + cgs[i].fd); + if (!ASSERT_OK_PTR(links[i], "cg_attach")) + goto cleanup; + } + + ping_and_check(cg_nr, 0); + + /* query the number of attached progs and attach flags in root cg */ + err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS, + 0, &attach_flags, NULL, &prog_cnt); + CHECK_FAIL(err); + CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI); + if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt)) + goto cleanup; + + /* query the number of effective progs in last cg */ + err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, NULL, NULL, + &prog_cnt); + CHECK_FAIL(err); + if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n", + cg_nr, prog_cnt)) + goto cleanup; + + /* query the effective prog IDs in last cg */ + err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS, + BPF_F_QUERY_EFFECTIVE, NULL, prog_ids, + &prog_cnt); + CHECK_FAIL(err); + if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n", + cg_nr, prog_cnt)) + goto cleanup; + for (i = 1; i < prog_cnt; i++) { + CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check", + "idx %d, prev id %d, cur id %d\n", + i, prog_ids[i - 1], prog_ids[i]); + } + + /* detach bottom program and ping again */ + bpf_link__destroy(links[last_cg]); + links[last_cg] = NULL; + + ping_and_check(cg_nr - 1, 0); + + /* mix in with non link-based multi-attachments */ + err = bpf_prog_attach(prog_fd, cgs[last_cg].fd, + BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI); + if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno)) + goto cleanup; + detach_legacy = true; + + links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress, + cgs[last_cg].fd); + if (!ASSERT_OK_PTR(links[last_cg], "cg_attach")) + goto cleanup; + + ping_and_check(cg_nr + 1, 0); + + /* detach link */ + bpf_link__destroy(links[last_cg]); + links[last_cg] = NULL; + + /* detach legacy */ + err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS); + if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno)) + goto cleanup; + detach_legacy = false; + + /* attach legacy exclusive prog attachment */ + err = bpf_prog_attach(prog_fd, cgs[last_cg].fd, + BPF_CGROUP_INET_EGRESS, 0); + if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno)) + goto cleanup; + detach_legacy = true; + + /* attempt to mix in with multi-attach bpf_link */ + tmp_link = bpf_program__attach_cgroup(skel->progs.egress, + cgs[last_cg].fd); + if (!ASSERT_ERR_PTR(tmp_link, "cg_attach_fail")) { + bpf_link__destroy(tmp_link); + goto cleanup; + } + + ping_and_check(cg_nr, 0); + + /* detach */ + err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS); + if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno)) + goto cleanup; + detach_legacy = false; + + ping_and_check(cg_nr - 1, 0); + + /* attach back link-based one */ + links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress, + cgs[last_cg].fd); + if (!ASSERT_OK_PTR(links[last_cg], "cg_attach")) + goto cleanup; + + ping_and_check(cg_nr, 0); + + /* check legacy exclusive prog can't be attached */ + err = bpf_prog_attach(prog_fd, cgs[last_cg].fd, + BPF_CGROUP_INET_EGRESS, 0); + if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) { + bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS); + goto cleanup; + } + + /* replace BPF programs inside their links for all but first link */ + for (i = 1; i < cg_nr; i++) { + err = bpf_link__update_program(links[i], skel->progs.egress_alt); + if (CHECK(err, "prog_upd", "link #%d\n", i)) + goto cleanup; + } + + ping_and_check(1, cg_nr - 1); + + /* Attempt program update with wrong expected BPF program */ + link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt); + link_upd_opts.flags = BPF_F_REPLACE; + err = bpf_link_update(bpf_link__fd(links[0]), + bpf_program__fd(skel->progs.egress_alt), + &link_upd_opts); + if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1", + "unexpectedly succeeded, err %d, errno %d\n", err, -errno)) + goto cleanup; + + /* Compare-exchange single link program from egress to egress_alt */ + link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress); + link_upd_opts.flags = BPF_F_REPLACE; + err = bpf_link_update(bpf_link__fd(links[0]), + bpf_program__fd(skel->progs.egress_alt), + &link_upd_opts); + if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno)) + goto cleanup; + + /* ping */ + ping_and_check(0, cg_nr); + + /* close cgroup FDs before detaching links */ + for (i = 0; i < cg_nr; i++) { + if (cgs[i].fd > 0) { + close(cgs[i].fd); + cgs[i].fd = -1; + } + } + + /* BPF programs should still get called */ + ping_and_check(0, cg_nr); + + prog_id = link_info_prog_id(links[0], &info); + CHECK(prog_id == 0, "link_info", "failed\n"); + CHECK(info.cgroup.cgroup_id == 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id); + + err = bpf_link__detach(links[0]); + if (CHECK(err, "link_detach", "failed %d\n", err)) + goto cleanup; + + /* cgroup_id should be zero in link_info */ + prog_id = link_info_prog_id(links[0], &info); + CHECK(prog_id == 0, "link_info", "failed\n"); + CHECK(info.cgroup.cgroup_id != 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id); + + /* First BPF program shouldn't be called anymore */ + ping_and_check(0, cg_nr - 1); + + /* leave cgroup and remove them, don't detach programs */ + cleanup_cgroup_environment(); + + /* BPF programs should have been auto-detached */ + ping_and_check(0, 0); + +cleanup: + if (detach_legacy) + bpf_prog_detach2(prog_fd, cgs[last_cg].fd, + BPF_CGROUP_INET_EGRESS); + + for (i = 0; i < cg_nr; i++) { + bpf_link__destroy(links[i]); + } + test_cgroup_link__destroy(skel); + + for (i = 0; i < cg_nr; i++) { + if (cgs[i].fd > 0) + close(cgs[i].fd); + } + cleanup_cgroup_environment(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c new file mode 100644 index 000000000..b9dc4ec65 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include <test_progs.h> + +#include "network_helpers.h" +#include "cgroup_skb_sk_lookup_kern.skel.h" + +static void run_lookup_test(__u16 *g_serv_port, int out_sk) +{ + int serv_sk = -1, in_sk = -1, serv_in_sk = -1, err; + struct sockaddr_in6 addr = {}; + socklen_t addr_len = sizeof(addr); + __u32 duration = 0; + + serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (CHECK(serv_sk < 0, "start_server", "failed to start server\n")) + return; + + err = getsockname(serv_sk, (struct sockaddr *)&addr, &addr_len); + if (CHECK(err, "getsockname", "errno %d\n", errno)) + goto cleanup; + + *g_serv_port = addr.sin6_port; + + /* Client outside of test cgroup should fail to connect by timeout. */ + err = connect_fd_to_fd(out_sk, serv_sk, 1000); + if (CHECK(!err || errno != EINPROGRESS, "connect_fd_to_fd", + "unexpected result err %d errno %d\n", err, errno)) + goto cleanup; + + /* Client inside test cgroup should connect just fine. */ + in_sk = connect_to_fd(serv_sk, 0); + if (CHECK(in_sk < 0, "connect_to_fd", "errno %d\n", errno)) + goto cleanup; + + serv_in_sk = accept(serv_sk, NULL, NULL); + if (CHECK(serv_in_sk < 0, "accept", "errno %d\n", errno)) + goto cleanup; + +cleanup: + close(serv_in_sk); + close(in_sk); + close(serv_sk); +} + +static void run_cgroup_bpf_test(const char *cg_path, int out_sk) +{ + struct cgroup_skb_sk_lookup_kern *skel; + struct bpf_link *link; + __u32 duration = 0; + int cgfd = -1; + + skel = cgroup_skb_sk_lookup_kern__open_and_load(); + if (CHECK(!skel, "skel_open_load", "open_load failed\n")) + return; + + cgfd = test__join_cgroup(cg_path); + if (CHECK(cgfd < 0, "cgroup_join", "cgroup setup failed\n")) + goto cleanup; + + link = bpf_program__attach_cgroup(skel->progs.ingress_lookup, cgfd); + if (!ASSERT_OK_PTR(link, "cgroup_attach")) + goto cleanup; + + run_lookup_test(&skel->bss->g_serv_port, out_sk); + + bpf_link__destroy(link); + +cleanup: + close(cgfd); + cgroup_skb_sk_lookup_kern__destroy(skel); +} + +void test_cgroup_skb_sk_lookup(void) +{ + const char *cg_path = "/foo"; + int out_sk; + + /* Create a socket before joining testing cgroup so that its cgroup id + * differs from that of testing cgroup. Moving selftests process to + * testing cgroup won't change cgroup id of an already created socket. + */ + out_sk = socket(AF_INET6, SOCK_STREAM, 0); + if (CHECK_FAIL(out_sk < 0)) + return; + + run_cgroup_bpf_test(cg_path, out_sk); + + close(out_sk); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c new file mode 100644 index 000000000..9026b4291 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include "connect4_dropper.skel.h" + +#include "cgroup_helpers.h" +#include "network_helpers.h" + +static int run_test(int cgroup_fd, int server_fd, bool classid) +{ + struct network_helper_opts opts = { + .must_fail = true, + }; + struct connect4_dropper *skel; + int fd, err = 0; + + skel = connect4_dropper__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return -1; + + skel->links.connect_v4_dropper = + bpf_program__attach_cgroup(skel->progs.connect_v4_dropper, + cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) { + err = -1; + goto out; + } + + if (classid && !ASSERT_OK(join_classid(), "join_classid")) { + err = -1; + goto out; + } + + fd = connect_to_fd_opts(server_fd, &opts); + if (fd < 0) + err = -1; + else + close(fd); +out: + connect4_dropper__destroy(skel); + return err; +} + +void test_cgroup_v1v2(void) +{ + struct network_helper_opts opts = {}; + int server_fd, client_fd, cgroup_fd; + static const int port = 60120; + + /* Step 1: Check base connectivity works without any BPF. */ + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); + if (!ASSERT_GE(server_fd, 0, "server_fd")) + return; + client_fd = connect_to_fd_opts(server_fd, &opts); + if (!ASSERT_GE(client_fd, 0, "client_fd")) { + close(server_fd); + return; + } + close(client_fd); + close(server_fd); + + /* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */ + cgroup_fd = test__join_cgroup("/connect_dropper"); + if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd")) + return; + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); + if (!ASSERT_GE(server_fd, 0, "server_fd")) { + close(cgroup_fd); + return; + } + ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only"); + setup_classid_environment(); + set_classid(42); + ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2"); + cleanup_classid_environment(); + close(server_fd); + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c new file mode 100644 index 000000000..a756760a4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Jesper Dangaard Brouer */ + +#include <linux/if_link.h> /* before test_progs.h, avoid bpf_util.h redefines */ +#include <test_progs.h> +#include "test_check_mtu.skel.h" +#include "network_helpers.h" + +#include <stdlib.h> +#include <inttypes.h> + +#define IFINDEX_LO 1 + +static __u32 duration; /* Hint: needed for CHECK macro */ + +static int read_mtu_device_lo(void) +{ + const char *filename = "/sys/class/net/lo/mtu"; + char buf[11] = {}; + int value, n, fd; + + fd = open(filename, 0, O_RDONLY); + if (fd == -1) + return -1; + + n = read(fd, buf, sizeof(buf)); + close(fd); + + if (n == -1) + return -2; + + value = strtoimax(buf, NULL, 10); + if (errno == ERANGE) + return -3; + + return value; +} + +static void test_check_mtu_xdp_attach(void) +{ + struct bpf_link_info link_info; + __u32 link_info_len = sizeof(link_info); + struct test_check_mtu *skel; + struct bpf_program *prog; + struct bpf_link *link; + int err = 0; + int fd; + + skel = test_check_mtu__open_and_load(); + if (CHECK(!skel, "open and load skel", "failed")) + return; /* Exit if e.g. helper unknown to kernel */ + + prog = skel->progs.xdp_use_helper_basic; + + link = bpf_program__attach_xdp(prog, IFINDEX_LO); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto out; + skel->links.xdp_use_helper_basic = link; + + memset(&link_info, 0, sizeof(link_info)); + fd = bpf_link__fd(link); + err = bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len); + if (CHECK(err, "link_info", "failed: %d\n", err)) + goto out; + + CHECK(link_info.type != BPF_LINK_TYPE_XDP, "link_type", + "got %u != exp %u\n", link_info.type, BPF_LINK_TYPE_XDP); + CHECK(link_info.xdp.ifindex != IFINDEX_LO, "link_ifindex", + "got %u != exp %u\n", link_info.xdp.ifindex, IFINDEX_LO); + + err = bpf_link__detach(link); + CHECK(err, "link_detach", "failed %d\n", err); + +out: + test_check_mtu__destroy(skel); +} + +static void test_check_mtu_run_xdp(struct test_check_mtu *skel, + struct bpf_program *prog, + __u32 mtu_expect) +{ + int retval_expect = XDP_PASS; + __u32 mtu_result = 0; + char buf[256] = {}; + int err, prog_fd = bpf_program__fd(prog); + LIBBPF_OPTS(bpf_test_run_opts, topts, + .repeat = 1, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + ); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, retval_expect, "retval"); + + /* Extract MTU that BPF-prog got */ + mtu_result = skel->bss->global_bpf_mtu_xdp; + ASSERT_EQ(mtu_result, mtu_expect, "MTU-compare-user"); +} + + +static void test_check_mtu_xdp(__u32 mtu, __u32 ifindex) +{ + struct test_check_mtu *skel; + int err; + + skel = test_check_mtu__open(); + if (CHECK(!skel, "skel_open", "failed")) + return; + + /* Update "constants" in BPF-prog *BEFORE* libbpf load */ + skel->rodata->GLOBAL_USER_MTU = mtu; + skel->rodata->GLOBAL_USER_IFINDEX = ifindex; + + err = test_check_mtu__load(skel); + if (CHECK(err, "skel_load", "failed: %d\n", err)) + goto cleanup; + + test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu); + test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu); + test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu); + test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu); + test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu); + +cleanup: + test_check_mtu__destroy(skel); +} + +static void test_check_mtu_run_tc(struct test_check_mtu *skel, + struct bpf_program *prog, + __u32 mtu_expect) +{ + int retval_expect = BPF_OK; + __u32 mtu_result = 0; + char buf[256] = {}; + int err, prog_fd = bpf_program__fd(prog); + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, retval_expect, "retval"); + + /* Extract MTU that BPF-prog got */ + mtu_result = skel->bss->global_bpf_mtu_tc; + ASSERT_EQ(mtu_result, mtu_expect, "MTU-compare-user"); +} + + +static void test_check_mtu_tc(__u32 mtu, __u32 ifindex) +{ + struct test_check_mtu *skel; + int err; + + skel = test_check_mtu__open(); + if (CHECK(!skel, "skel_open", "failed")) + return; + + /* Update "constants" in BPF-prog *BEFORE* libbpf load */ + skel->rodata->GLOBAL_USER_MTU = mtu; + skel->rodata->GLOBAL_USER_IFINDEX = ifindex; + + err = test_check_mtu__load(skel); + if (CHECK(err, "skel_load", "failed: %d\n", err)) + goto cleanup; + + test_check_mtu_run_tc(skel, skel->progs.tc_use_helper, mtu); + test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu, mtu); + test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu_da, mtu); + test_check_mtu_run_tc(skel, skel->progs.tc_minus_delta, mtu); + test_check_mtu_run_tc(skel, skel->progs.tc_input_len, mtu); + test_check_mtu_run_tc(skel, skel->progs.tc_input_len_exceed, mtu); +cleanup: + test_check_mtu__destroy(skel); +} + +void serial_test_check_mtu(void) +{ + int mtu_lo; + + if (test__start_subtest("bpf_check_mtu XDP-attach")) + test_check_mtu_xdp_attach(); + + mtu_lo = read_mtu_device_lo(); + if (CHECK(mtu_lo < 0, "reading MTU value", "failed (err:%d)", mtu_lo)) + return; + + if (test__start_subtest("bpf_check_mtu XDP-run")) + test_check_mtu_xdp(mtu_lo, 0); + + if (test__start_subtest("bpf_check_mtu XDP-run ifindex-lookup")) + test_check_mtu_xdp(mtu_lo, IFINDEX_LO); + + if (test__start_subtest("bpf_check_mtu TC-run")) + test_check_mtu_tc(mtu_lo, 0); + + if (test__start_subtest("bpf_check_mtu TC-run ifindex-lookup")) + test_check_mtu_tc(mtu_lo, IFINDEX_LO); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c new file mode 100644 index 000000000..224f016b0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2020 Cloudflare + +#define _GNU_SOURCE + +#include <arpa/inet.h> +#include <string.h> + +#include <linux/pkt_cls.h> +#include <netinet/tcp.h> + +#include <test_progs.h> + +#include "progs/test_cls_redirect.h" +#include "test_cls_redirect.skel.h" +#include "test_cls_redirect_subprogs.skel.h" + +#define ENCAP_IP INADDR_LOOPBACK +#define ENCAP_PORT (1234) + +static int duration = 0; + +struct addr_port { + in_port_t port; + union { + struct in_addr in_addr; + struct in6_addr in6_addr; + }; +}; + +struct tuple { + int family; + struct addr_port src; + struct addr_port dst; +}; + +static int start_server(const struct sockaddr *addr, socklen_t len, int type) +{ + int fd = socket(addr->sa_family, type, 0); + if (CHECK_FAIL(fd == -1)) + return -1; + if (CHECK_FAIL(bind(fd, addr, len) == -1)) + goto err; + if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1)) + goto err; + + return fd; + +err: + close(fd); + return -1; +} + +static int connect_to_server(const struct sockaddr *addr, socklen_t len, + int type) +{ + int fd = socket(addr->sa_family, type, 0); + if (CHECK_FAIL(fd == -1)) + return -1; + if (CHECK_FAIL(connect(fd, addr, len))) + goto err; + + return fd; + +err: + close(fd); + return -1; +} + +static bool fill_addr_port(const struct sockaddr *sa, struct addr_port *ap) +{ + const struct sockaddr_in6 *in6; + const struct sockaddr_in *in; + + switch (sa->sa_family) { + case AF_INET: + in = (const struct sockaddr_in *)sa; + ap->in_addr = in->sin_addr; + ap->port = in->sin_port; + return true; + + case AF_INET6: + in6 = (const struct sockaddr_in6 *)sa; + ap->in6_addr = in6->sin6_addr; + ap->port = in6->sin6_port; + return true; + + default: + return false; + } +} + +static bool set_up_conn(const struct sockaddr *addr, socklen_t len, int type, + int *server, int *conn, struct tuple *tuple) +{ + struct sockaddr_storage ss; + socklen_t slen = sizeof(ss); + struct sockaddr *sa = (struct sockaddr *)&ss; + + *server = start_server(addr, len, type); + if (*server < 0) + return false; + + if (CHECK_FAIL(getsockname(*server, sa, &slen))) + goto close_server; + + *conn = connect_to_server(sa, slen, type); + if (*conn < 0) + goto close_server; + + /* We want to simulate packets arriving at conn, so we have to + * swap src and dst. + */ + slen = sizeof(ss); + if (CHECK_FAIL(getsockname(*conn, sa, &slen))) + goto close_conn; + + if (CHECK_FAIL(!fill_addr_port(sa, &tuple->dst))) + goto close_conn; + + slen = sizeof(ss); + if (CHECK_FAIL(getpeername(*conn, sa, &slen))) + goto close_conn; + + if (CHECK_FAIL(!fill_addr_port(sa, &tuple->src))) + goto close_conn; + + tuple->family = ss.ss_family; + return true; + +close_conn: + close(*conn); + *conn = -1; +close_server: + close(*server); + *server = -1; + return false; +} + +static socklen_t prepare_addr(struct sockaddr_storage *addr, int family) +{ + struct sockaddr_in *addr4; + struct sockaddr_in6 *addr6; + + switch (family) { + case AF_INET: + addr4 = (struct sockaddr_in *)addr; + memset(addr4, 0, sizeof(*addr4)); + addr4->sin_family = family; + addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + return sizeof(*addr4); + case AF_INET6: + addr6 = (struct sockaddr_in6 *)addr; + memset(addr6, 0, sizeof(*addr6)); + addr6->sin6_family = family; + addr6->sin6_addr = in6addr_loopback; + return sizeof(*addr6); + default: + fprintf(stderr, "Invalid family %d", family); + return 0; + } +} + +static bool was_decapsulated(struct bpf_test_run_opts *tattr) +{ + return tattr->data_size_out < tattr->data_size_in; +} + +enum type { + UDP, + TCP, + __NR_KIND, +}; + +enum hops { + NO_HOPS, + ONE_HOP, +}; + +enum flags { + NONE, + SYN, + ACK, +}; + +enum conn { + KNOWN_CONN, + UNKNOWN_CONN, +}; + +enum result { + ACCEPT, + FORWARD, +}; + +struct test_cfg { + enum type type; + enum result result; + enum conn conn; + enum hops hops; + enum flags flags; +}; + +static int test_str(void *buf, size_t len, const struct test_cfg *test, + int family) +{ + const char *family_str, *type, *conn, *hops, *result, *flags; + + family_str = "IPv4"; + if (family == AF_INET6) + family_str = "IPv6"; + + type = "TCP"; + if (test->type == UDP) + type = "UDP"; + + conn = "known"; + if (test->conn == UNKNOWN_CONN) + conn = "unknown"; + + hops = "no hops"; + if (test->hops == ONE_HOP) + hops = "one hop"; + + result = "accept"; + if (test->result == FORWARD) + result = "forward"; + + flags = "none"; + if (test->flags == SYN) + flags = "SYN"; + else if (test->flags == ACK) + flags = "ACK"; + + return snprintf(buf, len, "%s %s %s %s (%s, flags: %s)", family_str, + type, result, conn, hops, flags); +} + +static struct test_cfg tests[] = { + { TCP, ACCEPT, UNKNOWN_CONN, NO_HOPS, SYN }, + { TCP, ACCEPT, UNKNOWN_CONN, NO_HOPS, ACK }, + { TCP, FORWARD, UNKNOWN_CONN, ONE_HOP, ACK }, + { TCP, ACCEPT, KNOWN_CONN, ONE_HOP, ACK }, + { UDP, ACCEPT, UNKNOWN_CONN, NO_HOPS, NONE }, + { UDP, FORWARD, UNKNOWN_CONN, ONE_HOP, NONE }, + { UDP, ACCEPT, KNOWN_CONN, ONE_HOP, NONE }, +}; + +static void encap_init(encap_headers_t *encap, uint8_t hop_count, uint8_t proto) +{ + const uint8_t hlen = + (sizeof(struct guehdr) / sizeof(uint32_t)) + hop_count; + *encap = (encap_headers_t){ + .eth = { .h_proto = htons(ETH_P_IP) }, + .ip = { + .ihl = 5, + .version = 4, + .ttl = IPDEFTTL, + .protocol = IPPROTO_UDP, + .daddr = htonl(ENCAP_IP) + }, + .udp = { + .dest = htons(ENCAP_PORT), + }, + .gue = { + .hlen = hlen, + .proto_ctype = proto + }, + .unigue = { + .hop_count = hop_count + }, + }; +} + +static size_t build_input(const struct test_cfg *test, void *const buf, + const struct tuple *tuple) +{ + in_port_t sport = tuple->src.port; + encap_headers_t encap; + struct iphdr ip; + struct ipv6hdr ipv6; + struct tcphdr tcp; + struct udphdr udp; + struct in_addr next_hop; + uint8_t *p = buf; + int proto; + + proto = IPPROTO_IPIP; + if (tuple->family == AF_INET6) + proto = IPPROTO_IPV6; + + encap_init(&encap, test->hops == ONE_HOP ? 1 : 0, proto); + p = mempcpy(p, &encap, sizeof(encap)); + + if (test->hops == ONE_HOP) { + next_hop = (struct in_addr){ .s_addr = htonl(0x7f000002) }; + p = mempcpy(p, &next_hop, sizeof(next_hop)); + } + + proto = IPPROTO_TCP; + if (test->type == UDP) + proto = IPPROTO_UDP; + + switch (tuple->family) { + case AF_INET: + ip = (struct iphdr){ + .ihl = 5, + .version = 4, + .ttl = IPDEFTTL, + .protocol = proto, + .saddr = tuple->src.in_addr.s_addr, + .daddr = tuple->dst.in_addr.s_addr, + }; + p = mempcpy(p, &ip, sizeof(ip)); + break; + case AF_INET6: + ipv6 = (struct ipv6hdr){ + .version = 6, + .hop_limit = IPDEFTTL, + .nexthdr = proto, + .saddr = tuple->src.in6_addr, + .daddr = tuple->dst.in6_addr, + }; + p = mempcpy(p, &ipv6, sizeof(ipv6)); + break; + default: + return 0; + } + + if (test->conn == UNKNOWN_CONN) + sport--; + + switch (test->type) { + case TCP: + tcp = (struct tcphdr){ + .source = sport, + .dest = tuple->dst.port, + }; + if (test->flags == SYN) + tcp.syn = true; + if (test->flags == ACK) + tcp.ack = true; + p = mempcpy(p, &tcp, sizeof(tcp)); + break; + case UDP: + udp = (struct udphdr){ + .source = sport, + .dest = tuple->dst.port, + }; + p = mempcpy(p, &udp, sizeof(udp)); + break; + default: + return 0; + } + + return (void *)p - buf; +} + +static void close_fds(int *fds, int n) +{ + int i; + + for (i = 0; i < n; i++) + if (fds[i] > 0) + close(fds[i]); +} + +static void test_cls_redirect_common(struct bpf_program *prog) +{ + LIBBPF_OPTS(bpf_test_run_opts, tattr); + int families[] = { AF_INET, AF_INET6 }; + struct sockaddr_storage ss; + struct sockaddr *addr; + socklen_t slen; + int i, j, err, prog_fd; + int servers[__NR_KIND][ARRAY_SIZE(families)] = {}; + int conns[__NR_KIND][ARRAY_SIZE(families)] = {}; + struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)]; + + addr = (struct sockaddr *)&ss; + for (i = 0; i < ARRAY_SIZE(families); i++) { + slen = prepare_addr(&ss, families[i]); + if (CHECK_FAIL(!slen)) + goto cleanup; + + if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_DGRAM, + &servers[UDP][i], &conns[UDP][i], + &tuples[UDP][i]))) + goto cleanup; + + if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_STREAM, + &servers[TCP][i], &conns[TCP][i], + &tuples[TCP][i]))) + goto cleanup; + } + + prog_fd = bpf_program__fd(prog); + for (i = 0; i < ARRAY_SIZE(tests); i++) { + struct test_cfg *test = &tests[i]; + + for (j = 0; j < ARRAY_SIZE(families); j++) { + struct tuple *tuple = &tuples[test->type][j]; + char input[256]; + char tmp[256]; + + test_str(tmp, sizeof(tmp), test, tuple->family); + if (!test__start_subtest(tmp)) + continue; + + tattr.data_out = tmp; + tattr.data_size_out = sizeof(tmp); + + tattr.data_in = input; + tattr.data_size_in = build_input(test, input, tuple); + if (CHECK_FAIL(!tattr.data_size_in)) + continue; + + err = bpf_prog_test_run_opts(prog_fd, &tattr); + if (CHECK_FAIL(err)) + continue; + + if (tattr.retval != TC_ACT_REDIRECT) { + PRINT_FAIL("expected TC_ACT_REDIRECT, got %d\n", + tattr.retval); + continue; + } + + switch (test->result) { + case ACCEPT: + if (CHECK_FAIL(!was_decapsulated(&tattr))) + continue; + break; + case FORWARD: + if (CHECK_FAIL(was_decapsulated(&tattr))) + continue; + break; + default: + PRINT_FAIL("unknown result %d\n", test->result); + continue; + } + } + } + +cleanup: + close_fds((int *)servers, sizeof(servers) / sizeof(servers[0][0])); + close_fds((int *)conns, sizeof(conns) / sizeof(conns[0][0])); +} + +static void test_cls_redirect_inlined(void) +{ + struct test_cls_redirect *skel; + int err; + + skel = test_cls_redirect__open(); + if (CHECK(!skel, "skel_open", "failed\n")) + return; + + skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP); + skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT); + + err = test_cls_redirect__load(skel); + if (CHECK(err, "skel_load", "failed: %d\n", err)) + goto cleanup; + + test_cls_redirect_common(skel->progs.cls_redirect); + +cleanup: + test_cls_redirect__destroy(skel); +} + +static void test_cls_redirect_subprogs(void) +{ + struct test_cls_redirect_subprogs *skel; + int err; + + skel = test_cls_redirect_subprogs__open(); + if (CHECK(!skel, "skel_open", "failed\n")) + return; + + skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP); + skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT); + + err = test_cls_redirect_subprogs__load(skel); + if (CHECK(err, "skel_load", "failed: %d\n", err)) + goto cleanup; + + test_cls_redirect_common(skel->progs.cls_redirect); + +cleanup: + test_cls_redirect_subprogs__destroy(skel); +} + +void test_cls_redirect(void) +{ + if (test__start_subtest("cls_redirect_inlined")) + test_cls_redirect_inlined(); + if (test__start_subtest("cls_redirect_subprogs")) + test_cls_redirect_subprogs(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c new file mode 100644 index 000000000..24d553109 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "network_helpers.h" + +static int verify_ports(int family, int fd, + __u16 expected_local, __u16 expected_peer) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + __u16 port; + + if (getsockname(fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get server addr"); + return -1; + } + + if (family == AF_INET) + port = ((struct sockaddr_in *)&addr)->sin_port; + else + port = ((struct sockaddr_in6 *)&addr)->sin6_port; + + if (ntohs(port) != expected_local) { + log_err("Unexpected local port %d, expected %d", ntohs(port), + expected_local); + return -1; + } + + if (getpeername(fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get peer addr"); + return -1; + } + + if (family == AF_INET) + port = ((struct sockaddr_in *)&addr)->sin_port; + else + port = ((struct sockaddr_in6 *)&addr)->sin6_port; + + if (ntohs(port) != expected_peer) { + log_err("Unexpected peer port %d, expected %d", ntohs(port), + expected_peer); + return -1; + } + + return 0; +} + +static int run_test(int cgroup_fd, int server_fd, int family, int type) +{ + bool v4 = family == AF_INET; + __u16 expected_local_port = v4 ? 22222 : 22223; + __u16 expected_peer_port = 60000; + struct bpf_program *prog; + struct bpf_object *obj; + const char *obj_file = v4 ? "connect_force_port4.bpf.o" : "connect_force_port6.bpf.o"; + int fd, err; + __u32 duration = 0; + + obj = bpf_object__open_file(obj_file, NULL); + if (!ASSERT_OK_PTR(obj, "bpf_obj_open")) + return -1; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "bpf_obj_load")) { + err = -EIO; + goto close_bpf_object; + } + + prog = bpf_object__find_program_by_name(obj, v4 ? + "connect4" : + "connect6"); + if (CHECK(!prog, "find_prog", "connect prog not found\n")) { + err = -EIO; + goto close_bpf_object; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, v4 ? + BPF_CGROUP_INET4_CONNECT : + BPF_CGROUP_INET6_CONNECT, 0); + if (err) { + log_err("Failed to attach BPF program"); + goto close_bpf_object; + } + + prog = bpf_object__find_program_by_name(obj, v4 ? + "getpeername4" : + "getpeername6"); + if (CHECK(!prog, "find_prog", "getpeername prog not found\n")) { + err = -EIO; + goto close_bpf_object; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, v4 ? + BPF_CGROUP_INET4_GETPEERNAME : + BPF_CGROUP_INET6_GETPEERNAME, 0); + if (err) { + log_err("Failed to attach BPF program"); + goto close_bpf_object; + } + + prog = bpf_object__find_program_by_name(obj, v4 ? + "getsockname4" : + "getsockname6"); + if (CHECK(!prog, "find_prog", "getsockname prog not found\n")) { + err = -EIO; + goto close_bpf_object; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, v4 ? + BPF_CGROUP_INET4_GETSOCKNAME : + BPF_CGROUP_INET6_GETSOCKNAME, 0); + if (err) { + log_err("Failed to attach BPF program"); + goto close_bpf_object; + } + + fd = connect_to_fd(server_fd, 0); + if (fd < 0) { + err = -1; + goto close_bpf_object; + } + + err = verify_ports(family, fd, expected_local_port, + expected_peer_port); + close(fd); + +close_bpf_object: + bpf_object__close(obj); + return err; +} + +void test_connect_force_port(void) +{ + int server_fd, cgroup_fd; + + cgroup_fd = test__join_cgroup("/connect_force_port"); + if (CHECK_FAIL(cgroup_fd < 0)) + return; + + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 60123, 0); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_STREAM)); + close(server_fd); + + server_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 60124, 0); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_STREAM)); + close(server_fd); + + server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 60123, 0); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_DGRAM)); + close(server_fd); + + server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 60124, 0); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_DGRAM)); + close(server_fd); + +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/connect_ping.c b/tools/testing/selftests/bpf/prog_tests/connect_ping.c new file mode 100644 index 000000000..289218c22 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/connect_ping.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2022 Google LLC. + */ + +#define _GNU_SOURCE +#include <sys/mount.h> + +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" + +#include "connect_ping.skel.h" + +/* 2001:db8::1 */ +#define BINDADDR_V6 { { { 0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,1 } } } +static const struct in6_addr bindaddr_v6 = BINDADDR_V6; + +static void subtest(int cgroup_fd, struct connect_ping *skel, + int family, int do_bind) +{ + struct sockaddr_in sa4 = { + .sin_family = AF_INET, + .sin_addr.s_addr = htonl(INADDR_LOOPBACK), + }; + struct sockaddr_in6 sa6 = { + .sin6_family = AF_INET6, + .sin6_addr = IN6ADDR_LOOPBACK_INIT, + }; + struct sockaddr *sa; + socklen_t sa_len; + int protocol; + int sock_fd; + + switch (family) { + case AF_INET: + sa = (struct sockaddr *)&sa4; + sa_len = sizeof(sa4); + protocol = IPPROTO_ICMP; + break; + case AF_INET6: + sa = (struct sockaddr *)&sa6; + sa_len = sizeof(sa6); + protocol = IPPROTO_ICMPV6; + break; + } + + memset(skel->bss, 0, sizeof(*skel->bss)); + skel->bss->do_bind = do_bind; + + sock_fd = socket(family, SOCK_DGRAM, protocol); + if (!ASSERT_GE(sock_fd, 0, "sock-create")) + return; + + if (!ASSERT_OK(connect(sock_fd, sa, sa_len), "connect")) + goto close_sock; + + if (!ASSERT_EQ(skel->bss->invocations_v4, family == AF_INET ? 1 : 0, + "invocations_v4")) + goto close_sock; + if (!ASSERT_EQ(skel->bss->invocations_v6, family == AF_INET6 ? 1 : 0, + "invocations_v6")) + goto close_sock; + if (!ASSERT_EQ(skel->bss->has_error, 0, "has_error")) + goto close_sock; + + if (!ASSERT_OK(getsockname(sock_fd, sa, &sa_len), + "getsockname")) + goto close_sock; + + switch (family) { + case AF_INET: + if (!ASSERT_EQ(sa4.sin_family, family, "sin_family")) + goto close_sock; + if (!ASSERT_EQ(sa4.sin_addr.s_addr, + htonl(do_bind ? 0x01010101 : INADDR_LOOPBACK), + "sin_addr")) + goto close_sock; + break; + case AF_INET6: + if (!ASSERT_EQ(sa6.sin6_family, AF_INET6, "sin6_family")) + goto close_sock; + if (!ASSERT_EQ(memcmp(&sa6.sin6_addr, + do_bind ? &bindaddr_v6 : &in6addr_loopback, + sizeof(sa6.sin6_addr)), + 0, "sin6_addr")) + goto close_sock; + break; + } + +close_sock: + close(sock_fd); +} + +void test_connect_ping(void) +{ + struct connect_ping *skel; + int cgroup_fd; + + if (!ASSERT_OK(unshare(CLONE_NEWNET | CLONE_NEWNS), "unshare")) + return; + + /* overmount sysfs, and making original sysfs private so overmount + * does not propagate to other mntns. + */ + if (!ASSERT_OK(mount("none", "/sys", NULL, MS_PRIVATE, NULL), + "remount-private-sys")) + return; + if (!ASSERT_OK(mount("sysfs", "/sys", "sysfs", 0, NULL), + "mount-sys")) + return; + if (!ASSERT_OK(mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL), + "mount-bpf")) + goto clean_mount; + + if (!ASSERT_OK(system("ip link set dev lo up"), "lo-up")) + goto clean_mount; + if (!ASSERT_OK(system("ip addr add 1.1.1.1 dev lo"), "lo-addr-v4")) + goto clean_mount; + if (!ASSERT_OK(system("ip -6 addr add 2001:db8::1 dev lo"), "lo-addr-v6")) + goto clean_mount; + if (write_sysctl("/proc/sys/net/ipv4/ping_group_range", "0 0")) + goto clean_mount; + + cgroup_fd = test__join_cgroup("/connect_ping"); + if (!ASSERT_GE(cgroup_fd, 0, "cg-create")) + goto clean_mount; + + skel = connect_ping__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel-load")) + goto close_cgroup; + skel->links.connect_v4_prog = + bpf_program__attach_cgroup(skel->progs.connect_v4_prog, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.connect_v4_prog, "cg-attach-v4")) + goto skel_destroy; + skel->links.connect_v6_prog = + bpf_program__attach_cgroup(skel->progs.connect_v6_prog, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.connect_v6_prog, "cg-attach-v6")) + goto skel_destroy; + + /* Connect a v4 ping socket to localhost, assert that only v4 is called, + * and called exactly once, and that the socket's bound address is + * original loopback address. + */ + if (test__start_subtest("ipv4")) + subtest(cgroup_fd, skel, AF_INET, 0); + + /* Connect a v4 ping socket to localhost, assert that only v4 is called, + * and called exactly once, and that the socket's bound address is + * address we explicitly bound. + */ + if (test__start_subtest("ipv4-bind")) + subtest(cgroup_fd, skel, AF_INET, 1); + + /* Connect a v6 ping socket to localhost, assert that only v6 is called, + * and called exactly once, and that the socket's bound address is + * original loopback address. + */ + if (test__start_subtest("ipv6")) + subtest(cgroup_fd, skel, AF_INET6, 0); + + /* Connect a v6 ping socket to localhost, assert that only v6 is called, + * and called exactly once, and that the socket's bound address is + * address we explicitly bound. + */ + if (test__start_subtest("ipv6-bind")) + subtest(cgroup_fd, skel, AF_INET6, 1); + +skel_destroy: + connect_ping__destroy(skel); + +close_cgroup: + close(cgroup_fd); + +clean_mount: + umount2("/sys", MNT_DETACH); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c new file mode 100644 index 000000000..f2ce4fd1c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include <bpf/btf.h> + +/* real layout and sizes according to test's (32-bit) BTF + * needs to be defined before skeleton is included */ +struct test_struct___real { + unsigned int ptr; /* can't use `void *`, it is always 8 byte in BPF target */ + unsigned int val2; + unsigned long long val1; + unsigned short val3; + unsigned char val4; + unsigned char _pad; +}; + +#include "test_core_autosize.skel.h" + +static int duration = 0; + +static struct { + unsigned long long ptr_samesized; + unsigned long long val1_samesized; + unsigned long long val2_samesized; + unsigned long long val3_samesized; + unsigned long long val4_samesized; + struct test_struct___real output_samesized; + + unsigned long long ptr_downsized; + unsigned long long val1_downsized; + unsigned long long val2_downsized; + unsigned long long val3_downsized; + unsigned long long val4_downsized; + struct test_struct___real output_downsized; + + unsigned long long ptr_probed; + unsigned long long val1_probed; + unsigned long long val2_probed; + unsigned long long val3_probed; + unsigned long long val4_probed; + + unsigned long long ptr_signed; + unsigned long long val1_signed; + unsigned long long val2_signed; + unsigned long long val3_signed; + unsigned long long val4_signed; + struct test_struct___real output_signed; +} out; + +void test_core_autosize(void) +{ + char btf_file[] = "/tmp/core_autosize.btf.XXXXXX"; + int err, fd = -1, zero = 0; + int char_id, short_id, int_id, long_long_id, void_ptr_id, id; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); + struct test_core_autosize* skel = NULL; + struct bpf_program *prog; + struct bpf_map *bss_map; + struct btf *btf = NULL; + size_t written; + const void *raw_data; + __u32 raw_sz; + FILE *f = NULL; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_btf")) + return; + /* Emit the following struct with 32-bit pointer size: + * + * struct test_struct { + * void *ptr; + * unsigned long val2; + * unsigned long long val1; + * unsigned short val3; + * unsigned char val4; + * char: 8; + * }; + * + * This struct is going to be used as the "kernel BTF" for this test. + * It's equivalent memory-layout-wise to test_struct__real above. + */ + + /* force 32-bit pointer size */ + btf__set_pointer_size(btf, 4); + + char_id = btf__add_int(btf, "unsigned char", 1, 0); + ASSERT_EQ(char_id, 1, "char_id"); + short_id = btf__add_int(btf, "unsigned short", 2, 0); + ASSERT_EQ(short_id, 2, "short_id"); + /* "long unsigned int" of 4 byte size tells BTF that sizeof(void *) == 4 */ + int_id = btf__add_int(btf, "long unsigned int", 4, 0); + ASSERT_EQ(int_id, 3, "int_id"); + long_long_id = btf__add_int(btf, "unsigned long long", 8, 0); + ASSERT_EQ(long_long_id, 4, "long_long_id"); + void_ptr_id = btf__add_ptr(btf, 0); + ASSERT_EQ(void_ptr_id, 5, "void_ptr_id"); + + id = btf__add_struct(btf, "test_struct", 20 /* bytes */); + ASSERT_EQ(id, 6, "struct_id"); + err = btf__add_field(btf, "ptr", void_ptr_id, 0, 0); + err = err ?: btf__add_field(btf, "val2", int_id, 32, 0); + err = err ?: btf__add_field(btf, "val1", long_long_id, 64, 0); + err = err ?: btf__add_field(btf, "val3", short_id, 128, 0); + err = err ?: btf__add_field(btf, "val4", char_id, 144, 0); + ASSERT_OK(err, "struct_fields"); + + fd = mkstemp(btf_file); + if (CHECK(fd < 0, "btf_tmp", "failed to create file: %d\n", fd)) + goto cleanup; + f = fdopen(fd, "w"); + if (!ASSERT_OK_PTR(f, "btf_fdopen")) + goto cleanup; + + raw_data = btf__raw_data(btf, &raw_sz); + if (!ASSERT_OK_PTR(raw_data, "raw_data")) + goto cleanup; + written = fwrite(raw_data, 1, raw_sz, f); + if (CHECK(written != raw_sz, "btf_write", "written: %zu, errno: %d\n", written, errno)) + goto cleanup; + fflush(f); + fclose(f); + f = NULL; + close(fd); + fd = -1; + + /* open and load BPF program with custom BTF as the kernel BTF */ + open_opts.btf_custom_path = btf_file; + skel = test_core_autosize__open_opts(&open_opts); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + /* disable handle_signed() for now */ + prog = bpf_object__find_program_by_name(skel->obj, "handle_signed"); + if (!ASSERT_OK_PTR(prog, "prog_find")) + goto cleanup; + bpf_program__set_autoload(prog, false); + + err = bpf_object__load(skel->obj); + if (!ASSERT_OK(err, "prog_load")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, "handle_samesize"); + if (!ASSERT_OK_PTR(prog, "prog_find")) + goto cleanup; + skel->links.handle_samesize = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(skel->links.handle_samesize, "prog_attach")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, "handle_downsize"); + if (!ASSERT_OK_PTR(prog, "prog_find")) + goto cleanup; + skel->links.handle_downsize = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(skel->links.handle_downsize, "prog_attach")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, "handle_probed"); + if (!ASSERT_OK_PTR(prog, "prog_find")) + goto cleanup; + skel->links.handle_probed = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(skel->links.handle_probed, "prog_attach")) + goto cleanup; + + usleep(1); + + bss_map = bpf_object__find_map_by_name(skel->obj, ".bss"); + if (!ASSERT_OK_PTR(bss_map, "bss_map_find")) + goto cleanup; + + err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0); + if (!ASSERT_OK(err, "bss_lookup")) + goto cleanup; + + ASSERT_EQ(out.ptr_samesized, 0x01020304, "ptr_samesized"); + ASSERT_EQ(out.val1_samesized, 0x1020304050607080, "val1_samesized"); + ASSERT_EQ(out.val2_samesized, 0x0a0b0c0d, "val2_samesized"); + ASSERT_EQ(out.val3_samesized, 0xfeed, "val3_samesized"); + ASSERT_EQ(out.val4_samesized, 0xb9, "val4_samesized"); + ASSERT_EQ(out.output_samesized.ptr, 0x01020304, "ptr_samesized"); + ASSERT_EQ(out.output_samesized.val1, 0x1020304050607080, "val1_samesized"); + ASSERT_EQ(out.output_samesized.val2, 0x0a0b0c0d, "val2_samesized"); + ASSERT_EQ(out.output_samesized.val3, 0xfeed, "val3_samesized"); + ASSERT_EQ(out.output_samesized.val4, 0xb9, "val4_samesized"); + + ASSERT_EQ(out.ptr_downsized, 0x01020304, "ptr_downsized"); + ASSERT_EQ(out.val1_downsized, 0x1020304050607080, "val1_downsized"); + ASSERT_EQ(out.val2_downsized, 0x0a0b0c0d, "val2_downsized"); + ASSERT_EQ(out.val3_downsized, 0xfeed, "val3_downsized"); + ASSERT_EQ(out.val4_downsized, 0xb9, "val4_downsized"); + ASSERT_EQ(out.output_downsized.ptr, 0x01020304, "ptr_downsized"); + ASSERT_EQ(out.output_downsized.val1, 0x1020304050607080, "val1_downsized"); + ASSERT_EQ(out.output_downsized.val2, 0x0a0b0c0d, "val2_downsized"); + ASSERT_EQ(out.output_downsized.val3, 0xfeed, "val3_downsized"); + ASSERT_EQ(out.output_downsized.val4, 0xb9, "val4_downsized"); + + ASSERT_EQ(out.ptr_probed, 0x01020304, "ptr_probed"); + ASSERT_EQ(out.val1_probed, 0x1020304050607080, "val1_probed"); + ASSERT_EQ(out.val2_probed, 0x0a0b0c0d, "val2_probed"); + ASSERT_EQ(out.val3_probed, 0xfeed, "val3_probed"); + ASSERT_EQ(out.val4_probed, 0xb9, "val4_probed"); + + test_core_autosize__destroy(skel); + skel = NULL; + + /* now re-load with handle_signed() enabled, it should fail loading */ + open_opts.btf_custom_path = btf_file; + skel = test_core_autosize__open_opts(&open_opts); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + err = test_core_autosize__load(skel); + if (!ASSERT_ERR(err, "skel_load")) + goto cleanup; + +cleanup: + if (f) + fclose(f); + if (fd >= 0) + close(fd); + remove(btf_file); + btf__free(btf); + test_core_autosize__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_extern.c b/tools/testing/selftests/bpf/prog_tests/core_extern.c new file mode 100644 index 000000000..63a51e9f3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_extern.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <test_progs.h> +#include <sys/mman.h> +#include <sys/utsname.h> +#include <linux/version.h> +#include "test_core_extern.skel.h" + +static uint32_t get_kernel_version(void) +{ + uint32_t major, minor, patch; + struct utsname info; + + uname(&info); + if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3) + return 0; + return KERNEL_VERSION(major, minor, patch); +} + +#define CFG "CONFIG_BPF_SYSCALL=n\n" + +static struct test_case { + const char *name; + const char *cfg; + bool fails; + struct test_core_extern__data data; +} test_cases[] = { + { .name = "default search path", .data = { .bpf_syscall = true } }, + { + .name = "custom values", + .cfg = "CONFIG_BPF_SYSCALL=n\n" + "CONFIG_TRISTATE=m\n" + "CONFIG_BOOL=y\n" + "CONFIG_CHAR=100\n" + "CONFIG_USHORT=30000\n" + "CONFIG_INT=123456\n" + "CONFIG_ULONG=0xDEADBEEFC0DE\n" + "CONFIG_STR=\"abracad\"\n" + "CONFIG_MISSING=0", + .data = { + .unkn_virt_val = 0, + .bpf_syscall = false, + .tristate_val = TRI_MODULE, + .bool_val = true, + .char_val = 100, + .ushort_val = 30000, + .int_val = 123456, + .ulong_val = 0xDEADBEEFC0DE, + .str_val = "abracad", + }, + }, + /* TRISTATE */ + { .name = "tristate (y)", .cfg = CFG"CONFIG_TRISTATE=y\n", + .data = { .tristate_val = TRI_YES } }, + { .name = "tristate (n)", .cfg = CFG"CONFIG_TRISTATE=n\n", + .data = { .tristate_val = TRI_NO } }, + { .name = "tristate (m)", .cfg = CFG"CONFIG_TRISTATE=m\n", + .data = { .tristate_val = TRI_MODULE } }, + { .name = "tristate (int)", .fails = 1, .cfg = CFG"CONFIG_TRISTATE=1" }, + { .name = "tristate (bad)", .fails = 1, .cfg = CFG"CONFIG_TRISTATE=M" }, + /* BOOL */ + { .name = "bool (y)", .cfg = CFG"CONFIG_BOOL=y\n", + .data = { .bool_val = true } }, + { .name = "bool (n)", .cfg = CFG"CONFIG_BOOL=n\n", + .data = { .bool_val = false } }, + { .name = "bool (tristate)", .fails = 1, .cfg = CFG"CONFIG_BOOL=m" }, + { .name = "bool (int)", .fails = 1, .cfg = CFG"CONFIG_BOOL=1" }, + /* CHAR */ + { .name = "char (tristate)", .cfg = CFG"CONFIG_CHAR=m\n", + .data = { .char_val = 'm' } }, + { .name = "char (bad)", .fails = 1, .cfg = CFG"CONFIG_CHAR=q\n" }, + { .name = "char (empty)", .fails = 1, .cfg = CFG"CONFIG_CHAR=\n" }, + { .name = "char (str)", .fails = 1, .cfg = CFG"CONFIG_CHAR=\"y\"\n" }, + /* STRING */ + { .name = "str (empty)", .cfg = CFG"CONFIG_STR=\"\"\n", + .data = { .str_val = "\0\0\0\0\0\0\0" } }, + { .name = "str (padded)", .cfg = CFG"CONFIG_STR=\"abra\"\n", + .data = { .str_val = "abra\0\0\0" } }, + { .name = "str (too long)", .cfg = CFG"CONFIG_STR=\"abracada\"\n", + .data = { .str_val = "abracad" } }, + { .name = "str (no value)", .fails = 1, .cfg = CFG"CONFIG_STR=\n" }, + { .name = "str (bad value)", .fails = 1, .cfg = CFG"CONFIG_STR=bla\n" }, + /* INTEGERS */ + { + .name = "integer forms", + .cfg = CFG + "CONFIG_CHAR=0xA\n" + "CONFIG_USHORT=0462\n" + "CONFIG_INT=-100\n" + "CONFIG_ULONG=+1000000000000", + .data = { + .char_val = 0xA, + .ushort_val = 0462, + .int_val = -100, + .ulong_val = 1000000000000, + }, + }, + { .name = "int (bad)", .fails = 1, .cfg = CFG"CONFIG_INT=abc" }, + { .name = "int (str)", .fails = 1, .cfg = CFG"CONFIG_INT=\"abc\"" }, + { .name = "int (empty)", .fails = 1, .cfg = CFG"CONFIG_INT=" }, + { .name = "int (mixed)", .fails = 1, .cfg = CFG"CONFIG_INT=123abc" }, + { .name = "int (max)", .cfg = CFG"CONFIG_INT=2147483647", + .data = { .int_val = 2147483647 } }, + { .name = "int (min)", .cfg = CFG"CONFIG_INT=-2147483648", + .data = { .int_val = -2147483648 } }, + { .name = "int (max+1)", .fails = 1, .cfg = CFG"CONFIG_INT=2147483648" }, + { .name = "int (min-1)", .fails = 1, .cfg = CFG"CONFIG_INT=-2147483649" }, + { .name = "ushort (max)", .cfg = CFG"CONFIG_USHORT=65535", + .data = { .ushort_val = 65535 } }, + { .name = "ushort (min)", .cfg = CFG"CONFIG_USHORT=0", + .data = { .ushort_val = 0 } }, + { .name = "ushort (max+1)", .fails = 1, .cfg = CFG"CONFIG_USHORT=65536" }, + { .name = "ushort (min-1)", .fails = 1, .cfg = CFG"CONFIG_USHORT=-1" }, + { .name = "u64 (max)", .cfg = CFG"CONFIG_ULONG=0xffffffffffffffff", + .data = { .ulong_val = 0xffffffffffffffff } }, + { .name = "u64 (min)", .cfg = CFG"CONFIG_ULONG=0", + .data = { .ulong_val = 0 } }, + { .name = "u64 (max+1)", .fails = 1, .cfg = CFG"CONFIG_ULONG=0x10000000000000000" }, +}; + +void test_core_extern(void) +{ + const uint32_t kern_ver = get_kernel_version(); + int err, i, j; + struct test_core_extern *skel = NULL; + uint64_t *got, *exp; + int n = sizeof(*skel->data) / sizeof(uint64_t); + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + struct test_case *t = &test_cases[i]; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .kconfig = t->cfg, + ); + + if (!test__start_subtest(t->name)) + continue; + + skel = test_core_extern__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + err = test_core_extern__load(skel); + if (t->fails) { + ASSERT_ERR(err, "skel_load_should_fail"); + goto cleanup; + } else if (!ASSERT_OK(err, "skel_load")) { + goto cleanup; + } + err = test_core_extern__attach(skel); + if (!ASSERT_OK(err, "attach_raw_tp")) + goto cleanup; + + usleep(1); + + t->data.kern_ver = kern_ver; + t->data.missing_val = 0xDEADC0DE; + got = (uint64_t *)skel->data; + exp = (uint64_t *)&t->data; + for (j = 0; j < n; j++) { + ASSERT_EQ(got[j], exp[j], "result"); + } +cleanup: + test_core_extern__destroy(skel); + skel = NULL; + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern.c b/tools/testing/selftests/bpf/prog_tests/core_kern.c new file mode 100644 index 000000000..6a5a1c019 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_kern.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "test_progs.h" +#include "core_kern.lskel.h" + +void test_core_kern_lskel(void) +{ + struct core_kern_lskel *skel; + int link_fd; + + skel = core_kern_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + link_fd = core_kern_lskel__core_relo_proto__attach(skel); + if (!ASSERT_GT(link_fd, 0, "attach(core_relo_proto)")) + goto cleanup; + + /* trigger tracepoints */ + usleep(1); + ASSERT_TRUE(skel->bss->proto_out[0], "bpf_core_type_exists"); + ASSERT_FALSE(skel->bss->proto_out[1], "!bpf_core_type_exists"); + ASSERT_TRUE(skel->bss->proto_out[2], "bpf_core_type_exists. nested"); + +cleanup: + core_kern_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c b/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c new file mode 100644 index 000000000..04cc145bc --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "test_progs.h" +#include "core_kern_overflow.lskel.h" + +void test_core_kern_overflow_lskel(void) +{ + struct core_kern_overflow_lskel *skel; + + skel = core_kern_overflow_lskel__open_and_load(); + if (!ASSERT_NULL(skel, "open_and_load")) + core_kern_overflow_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_read_macros.c b/tools/testing/selftests/bpf/prog_tests/core_read_macros.c new file mode 100644 index 000000000..96f5cf3c6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_read_macros.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> + +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *head); +}; + +/* ___shuffled flavor is just an illusion for BPF code, it doesn't really + * exist and user-space needs to provide data in the memory layout that + * matches callback_head. We just defined ___shuffled flavor to make it easier + * to work with the skeleton + */ +struct callback_head___shuffled { + struct callback_head___shuffled *next; + void (*func)(struct callback_head *head); +}; + +#include "test_core_read_macros.skel.h" + +void test_core_read_macros(void) +{ + int duration = 0, err; + struct test_core_read_macros* skel; + struct test_core_read_macros__bss *bss; + struct callback_head u_probe_in; + struct callback_head___shuffled u_core_in; + + skel = test_core_read_macros__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + bss = skel->bss; + bss->my_pid = getpid(); + + /* next pointers have to be set from the kernel side */ + bss->k_probe_in.func = (void *)(long)0x1234; + bss->k_core_in.func = (void *)(long)0xabcd; + + u_probe_in.next = &u_probe_in; + u_probe_in.func = (void *)(long)0x5678; + bss->u_probe_in = &u_probe_in; + + u_core_in.next = &u_core_in; + u_core_in.func = (void *)(long)0xdbca; + bss->u_core_in = &u_core_in; + + err = test_core_read_macros__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + ASSERT_EQ(bss->k_probe_out, 0x1234, "k_probe_out"); + ASSERT_EQ(bss->k_core_out, 0xabcd, "k_core_out"); + + ASSERT_EQ(bss->u_probe_out, 0x5678, "u_probe_out"); + ASSERT_EQ(bss->u_core_out, 0xdbca, "u_core_out"); + +cleanup: + test_core_read_macros__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c new file mode 100644 index 000000000..47f42e680 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -0,0 +1,1155 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "progs/core_reloc_types.h" +#include "bpf_testmod/bpf_testmod.h" +#include <linux/limits.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include <bpf/btf.h> + +static int duration = 0; + +#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name) + +#define MODULES_CASE(name, pg_name, tp_name) { \ + .case_name = name, \ + .bpf_obj_file = "test_core_reloc_module.bpf.o", \ + .btf_src_file = NULL, /* find in kernel module BTFs */ \ + .input = "", \ + .input_len = 0, \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_module_output) { \ + .read_ctx_sz = sizeof(struct bpf_testmod_test_read_ctx),\ + .read_ctx_exists = true, \ + .buf_exists = true, \ + .len_exists = true, \ + .off_exists = true, \ + .len = 123, \ + .off = 0, \ + .comm = "test_progs", \ + .comm_len = sizeof("test_progs"), \ + }, \ + .output_len = sizeof(struct core_reloc_module_output), \ + .prog_name = pg_name, \ + .raw_tp_name = tp_name, \ + .trigger = __trigger_module_test_read, \ + .needs_testmod = true, \ +} + +#define FLAVORS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = 42, \ + .b = 0xc001, \ + .c = 0xbeef, \ +} + +#define FLAVORS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_flavors.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_flavors" \ + +#define FLAVORS_CASE(name) { \ + FLAVORS_CASE_COMMON(name), \ + .input = FLAVORS_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = FLAVORS_DATA(core_reloc_flavors), \ + .output_len = sizeof(struct core_reloc_flavors), \ +} + +#define FLAVORS_ERR_CASE(name) { \ + FLAVORS_CASE_COMMON(name), \ + .fails = true, \ +} + +#define NESTING_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = { .a = { .a = 42 } }, \ + .b = { .b = { .b = 0xc001 } }, \ +} + +#define NESTING_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_nesting.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_nesting" \ + +#define NESTING_CASE(name) { \ + NESTING_CASE_COMMON(name), \ + .input = NESTING_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = NESTING_DATA(core_reloc_nesting), \ + .output_len = sizeof(struct core_reloc_nesting) \ +} + +#define NESTING_ERR_CASE(name) { \ + NESTING_CASE_COMMON(name), \ + .fails = true, \ + .run_btfgen_fails = true, \ +} + +#define ARRAYS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = { [2] = 1 }, \ + .b = { [1] = { [2] = { [3] = 2 } } }, \ + .c = { [1] = { .c = 3 } }, \ + .d = { [0] = { [0] = { .d = 4 } } }, \ +} + +#define ARRAYS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_arrays.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_arrays" \ + +#define ARRAYS_CASE(name) { \ + ARRAYS_CASE_COMMON(name), \ + .input = ARRAYS_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_arrays_output) { \ + .a2 = 1, \ + .b123 = 2, \ + .c1c = 3, \ + .d00d = 4, \ + .f10c = 0, \ + }, \ + .output_len = sizeof(struct core_reloc_arrays_output) \ +} + +#define ARRAYS_ERR_CASE(name) { \ + ARRAYS_CASE_COMMON(name), \ + .fails = true, \ +} + +#define PRIMITIVES_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = 1, \ + .b = 2, \ + .c = 3, \ + .d = (void *)4, \ + .f = (void *)5, \ +} + +#define PRIMITIVES_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_primitives.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_primitives" \ + +#define PRIMITIVES_CASE(name) { \ + PRIMITIVES_CASE_COMMON(name), \ + .input = PRIMITIVES_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = PRIMITIVES_DATA(core_reloc_primitives), \ + .output_len = sizeof(struct core_reloc_primitives), \ +} + +#define PRIMITIVES_ERR_CASE(name) { \ + PRIMITIVES_CASE_COMMON(name), \ + .fails = true, \ +} + +#define MODS_CASE(name) { \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_mods.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) { \ + .a = 1, \ + .b = 2, \ + .c = (void *)3, \ + .d = (void *)4, \ + .e = { [2] = 5 }, \ + .f = { [1] = 6 }, \ + .g = { .x = 7 }, \ + .h = { .y = 8 }, \ + }, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_mods_output) { \ + .a = 1, .b = 2, .c = 3, .d = 4, \ + .e = 5, .f = 6, .g = 7, .h = 8, \ + }, \ + .output_len = sizeof(struct core_reloc_mods_output), \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_mods", \ +} + +#define PTR_AS_ARR_CASE(name) { \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_ptr_as_arr.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .input = (const char *)&(struct core_reloc_##name []){ \ + { .a = 1 }, \ + { .a = 2 }, \ + { .a = 3 }, \ + }, \ + .input_len = 3 * sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_ptr_as_arr) { \ + .a = 3, \ + }, \ + .output_len = sizeof(struct core_reloc_ptr_as_arr), \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_ptr_as_arr", \ +} + +#define INTS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .u8_field = 1, \ + .s8_field = 2, \ + .u16_field = 3, \ + .s16_field = 4, \ + .u32_field = 5, \ + .s32_field = 6, \ + .u64_field = 7, \ + .s64_field = 8, \ +} + +#define INTS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_ints.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_ints" + +#define INTS_CASE(name) { \ + INTS_CASE_COMMON(name), \ + .input = INTS_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = INTS_DATA(core_reloc_ints), \ + .output_len = sizeof(struct core_reloc_ints), \ +} + +#define INTS_ERR_CASE(name) { \ + INTS_CASE_COMMON(name), \ + .fails = true, \ +} + +#define FIELD_EXISTS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_existence.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_existence" + +#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \ + .case_name = test_name_prefix#name, \ + .bpf_obj_file = objfile, \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o" + +#define BITFIELDS_CASE(name, ...) { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.bpf.o", \ + "probed:", name), \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_bitfields_output), \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_bitfields", \ +}, { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.bpf.o", \ + "direct:", name), \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_bitfields_output), \ + .prog_name = "test_core_bitfields_direct", \ +} + + +#define BITFIELDS_ERR_CASE(name) { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.bpf.o", \ + "probed:", name), \ + .fails = true, \ + .run_btfgen_fails = true, \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_bitfields", \ +}, { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.bpf.o", \ + "direct:", name), \ + .fails = true, \ + .run_btfgen_fails = true, \ + .prog_name = "test_core_bitfields_direct", \ +} + +#define SIZE_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_size.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_size" + +#define SIZE_OUTPUT_DATA(type) \ + STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \ + .int_sz = sizeof(((type *)0)->int_field), \ + .int_off = offsetof(type, int_field), \ + .struct_sz = sizeof(((type *)0)->struct_field), \ + .struct_off = offsetof(type, struct_field), \ + .union_sz = sizeof(((type *)0)->union_field), \ + .union_off = offsetof(type, union_field), \ + .arr_sz = sizeof(((type *)0)->arr_field), \ + .arr_off = offsetof(type, arr_field), \ + .arr_elem_sz = sizeof(((type *)0)->arr_field[1]), \ + .arr_elem_off = offsetof(type, arr_field[1]), \ + .ptr_sz = 8, /* always 8-byte pointer for BPF */ \ + .ptr_off = offsetof(type, ptr_field), \ + .enum_sz = sizeof(((type *)0)->enum_field), \ + .enum_off = offsetof(type, enum_field), \ + .float_sz = sizeof(((type *)0)->float_field), \ + .float_off = offsetof(type, float_field), \ + } + +#define SIZE_CASE(name) { \ + SIZE_CASE_COMMON(name), \ + .input_len = 0, \ + .output = SIZE_OUTPUT_DATA(struct core_reloc_##name), \ + .output_len = sizeof(struct core_reloc_size_output), \ +} + +#define SIZE_ERR_CASE(name) { \ + SIZE_CASE_COMMON(name), \ + .fails = true, \ + .run_btfgen_fails = true, \ +} + +#define TYPE_BASED_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_type_based.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_type_based" + +#define TYPE_BASED_CASE(name, ...) { \ + TYPE_BASED_CASE_COMMON(name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_type_based_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_type_based_output), \ +} + +#define TYPE_BASED_ERR_CASE(name) { \ + TYPE_BASED_CASE_COMMON(name), \ + .fails = true, \ +} + +#define TYPE_ID_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_type_id.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_type_id" + +#define TYPE_ID_CASE(name, setup_fn) { \ + TYPE_ID_CASE_COMMON(name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_type_id_output) {}, \ + .output_len = sizeof(struct core_reloc_type_id_output), \ + .setup = setup_fn, \ +} + +#define TYPE_ID_ERR_CASE(name) { \ + TYPE_ID_CASE_COMMON(name), \ + .fails = true, \ +} + +#define ENUMVAL_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_enumval.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_enumval" + +#define ENUMVAL_CASE(name, ...) { \ + ENUMVAL_CASE_COMMON(name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_enumval_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_enumval_output), \ +} + +#define ENUMVAL_ERR_CASE(name) { \ + ENUMVAL_CASE_COMMON(name), \ + .fails = true, \ +} + +#define ENUM64VAL_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_enum64val.bpf.o", \ + .btf_src_file = "btf__core_reloc_" #name ".bpf.o", \ + .raw_tp_name = "sys_enter", \ + .prog_name = "test_core_enum64val" + +#define ENUM64VAL_CASE(name, ...) { \ + ENUM64VAL_CASE_COMMON(name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_enum64val_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_enum64val_output), \ +} + +#define ENUM64VAL_ERR_CASE(name) { \ + ENUM64VAL_CASE_COMMON(name), \ + .fails = true, \ +} + +struct core_reloc_test_case; + +typedef int (*setup_test_fn)(struct core_reloc_test_case *test); +typedef int (*trigger_test_fn)(const struct core_reloc_test_case *test); + +struct core_reloc_test_case { + const char *case_name; + const char *bpf_obj_file; + const char *btf_src_file; + const char *input; + int input_len; + const char *output; + int output_len; + bool fails; + bool run_btfgen_fails; + bool needs_testmod; + bool relaxed_core_relocs; + const char *prog_name; + const char *raw_tp_name; + setup_test_fn setup; + trigger_test_fn trigger; +}; + +static int find_btf_type(const struct btf *btf, const char *name, __u32 kind) +{ + int id; + + id = btf__find_by_name_kind(btf, name, kind); + if (CHECK(id <= 0, "find_type_id", "failed to find '%s', kind %d: %d\n", name, kind, id)) + return -1; + + return id; +} + +static int setup_type_id_case_local(struct core_reloc_test_case *test) +{ + struct core_reloc_type_id_output *exp = (void *)test->output; + struct btf *local_btf = btf__parse(test->bpf_obj_file, NULL); + struct btf *targ_btf = btf__parse(test->btf_src_file, NULL); + const struct btf_type *t; + const char *name; + int i; + + if (!ASSERT_OK_PTR(local_btf, "local_btf") || !ASSERT_OK_PTR(targ_btf, "targ_btf")) { + btf__free(local_btf); + btf__free(targ_btf); + return -EINVAL; + } + + exp->local_anon_struct = -1; + exp->local_anon_union = -1; + exp->local_anon_enum = -1; + exp->local_anon_func_proto_ptr = -1; + exp->local_anon_void_ptr = -1; + exp->local_anon_arr = -1; + + for (i = 1; i < btf__type_cnt(local_btf); i++) + { + t = btf__type_by_id(local_btf, i); + /* we are interested only in anonymous types */ + if (t->name_off) + continue; + + if (btf_is_struct(t) && btf_vlen(t) && + (name = btf__name_by_offset(local_btf, btf_members(t)[0].name_off)) && + strcmp(name, "marker_field") == 0) { + exp->local_anon_struct = i; + } else if (btf_is_union(t) && btf_vlen(t) && + (name = btf__name_by_offset(local_btf, btf_members(t)[0].name_off)) && + strcmp(name, "marker_field") == 0) { + exp->local_anon_union = i; + } else if (btf_is_enum(t) && btf_vlen(t) && + (name = btf__name_by_offset(local_btf, btf_enum(t)[0].name_off)) && + strcmp(name, "MARKER_ENUM_VAL") == 0) { + exp->local_anon_enum = i; + } else if (btf_is_ptr(t) && (t = btf__type_by_id(local_btf, t->type))) { + if (btf_is_func_proto(t) && (t = btf__type_by_id(local_btf, t->type)) && + btf_is_int(t) && (name = btf__name_by_offset(local_btf, t->name_off)) && + strcmp(name, "_Bool") == 0) { + /* ptr -> func_proto -> _Bool */ + exp->local_anon_func_proto_ptr = i; + } else if (btf_is_void(t)) { + /* ptr -> void */ + exp->local_anon_void_ptr = i; + } + } else if (btf_is_array(t) && (t = btf__type_by_id(local_btf, btf_array(t)->type)) && + btf_is_int(t) && (name = btf__name_by_offset(local_btf, t->name_off)) && + strcmp(name, "_Bool") == 0) { + /* _Bool[] */ + exp->local_anon_arr = i; + } + } + + exp->local_struct = find_btf_type(local_btf, "a_struct", BTF_KIND_STRUCT); + exp->local_union = find_btf_type(local_btf, "a_union", BTF_KIND_UNION); + exp->local_enum = find_btf_type(local_btf, "an_enum", BTF_KIND_ENUM); + exp->local_int = find_btf_type(local_btf, "int", BTF_KIND_INT); + exp->local_struct_typedef = find_btf_type(local_btf, "named_struct_typedef", BTF_KIND_TYPEDEF); + exp->local_func_proto_typedef = find_btf_type(local_btf, "func_proto_typedef", BTF_KIND_TYPEDEF); + exp->local_arr_typedef = find_btf_type(local_btf, "arr_typedef", BTF_KIND_TYPEDEF); + + btf__free(local_btf); + btf__free(targ_btf); + return 0; +} + +static int setup_type_id_case_success(struct core_reloc_test_case *test) { + struct core_reloc_type_id_output *exp = (void *)test->output; + struct btf *targ_btf; + int err; + + err = setup_type_id_case_local(test); + if (err) + return err; + + targ_btf = btf__parse(test->btf_src_file, NULL); + + exp->targ_struct = find_btf_type(targ_btf, "a_struct", BTF_KIND_STRUCT); + exp->targ_union = find_btf_type(targ_btf, "a_union", BTF_KIND_UNION); + exp->targ_enum = find_btf_type(targ_btf, "an_enum", BTF_KIND_ENUM); + exp->targ_int = find_btf_type(targ_btf, "int", BTF_KIND_INT); + exp->targ_struct_typedef = find_btf_type(targ_btf, "named_struct_typedef", BTF_KIND_TYPEDEF); + exp->targ_func_proto_typedef = find_btf_type(targ_btf, "func_proto_typedef", BTF_KIND_TYPEDEF); + exp->targ_arr_typedef = find_btf_type(targ_btf, "arr_typedef", BTF_KIND_TYPEDEF); + + btf__free(targ_btf); + return 0; +} + +static int setup_type_id_case_failure(struct core_reloc_test_case *test) +{ + struct core_reloc_type_id_output *exp = (void *)test->output; + int err; + + err = setup_type_id_case_local(test); + if (err) + return err; + + exp->targ_struct = 0; + exp->targ_union = 0; + exp->targ_enum = 0; + exp->targ_int = 0; + exp->targ_struct_typedef = 0; + exp->targ_func_proto_typedef = 0; + exp->targ_arr_typedef = 0; + + return 0; +} + +static int __trigger_module_test_read(const struct core_reloc_test_case *test) +{ + struct core_reloc_module_output *exp = (void *)test->output; + + trigger_module_test_read(exp->len); + return 0; +} + +static const struct core_reloc_test_case test_cases[] = { + /* validate we can find kernel image and use its BTF for relocs */ + { + .case_name = "kernel", + .bpf_obj_file = "test_core_reloc_kernel.bpf.o", + .btf_src_file = NULL, /* load from /lib/modules/$(uname -r) */ + .input = "", + .input_len = 0, + .output = STRUCT_TO_CHAR_PTR(core_reloc_kernel_output) { + .valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, + .comm = "test_progs", + .comm_len = sizeof("test_progs"), + .local_task_struct_matches = true, + }, + .output_len = sizeof(struct core_reloc_kernel_output), + .raw_tp_name = "sys_enter", + .prog_name = "test_core_kernel", + }, + + /* validate we can find kernel module BTF types for relocs/attach */ + MODULES_CASE("module_probed", "test_core_module_probed", "bpf_testmod_test_read"), + MODULES_CASE("module_direct", "test_core_module_direct", NULL), + + /* validate BPF program can use multiple flavors to match against + * single target BTF type + */ + FLAVORS_CASE(flavors), + + FLAVORS_ERR_CASE(flavors__err_wrong_name), + + /* various struct/enum nesting and resolution scenarios */ + NESTING_CASE(nesting), + NESTING_CASE(nesting___anon_embed), + NESTING_CASE(nesting___struct_union_mixup), + NESTING_CASE(nesting___extra_nesting), + NESTING_CASE(nesting___dup_compat_types), + + NESTING_ERR_CASE(nesting___err_missing_field), + NESTING_ERR_CASE(nesting___err_array_field), + NESTING_ERR_CASE(nesting___err_missing_container), + NESTING_ERR_CASE(nesting___err_nonstruct_container), + NESTING_ERR_CASE(nesting___err_array_container), + NESTING_ERR_CASE(nesting___err_dup_incompat_types), + NESTING_ERR_CASE(nesting___err_partial_match_dups), + NESTING_ERR_CASE(nesting___err_too_deep), + + /* various array access relocation scenarios */ + ARRAYS_CASE(arrays), + ARRAYS_CASE(arrays___diff_arr_dim), + ARRAYS_CASE(arrays___diff_arr_val_sz), + ARRAYS_CASE(arrays___equiv_zero_sz_arr), + ARRAYS_CASE(arrays___fixed_arr), + + ARRAYS_ERR_CASE(arrays___err_too_small), + ARRAYS_ERR_CASE(arrays___err_too_shallow), + ARRAYS_ERR_CASE(arrays___err_non_array), + ARRAYS_ERR_CASE(arrays___err_wrong_val_type), + ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr), + + /* enum/ptr/int handling scenarios */ + PRIMITIVES_CASE(primitives), + PRIMITIVES_CASE(primitives___diff_enum_def), + PRIMITIVES_CASE(primitives___diff_func_proto), + PRIMITIVES_CASE(primitives___diff_ptr_type), + + PRIMITIVES_ERR_CASE(primitives___err_non_enum), + PRIMITIVES_ERR_CASE(primitives___err_non_int), + PRIMITIVES_ERR_CASE(primitives___err_non_ptr), + + /* const/volatile/restrict and typedefs scenarios */ + MODS_CASE(mods), + MODS_CASE(mods___mod_swap), + MODS_CASE(mods___typedefs), + + /* handling "ptr is an array" semantics */ + PTR_AS_ARR_CASE(ptr_as_arr), + PTR_AS_ARR_CASE(ptr_as_arr___diff_sz), + + /* int signedness/sizing/bitfield handling */ + INTS_CASE(ints), + INTS_CASE(ints___bool), + INTS_CASE(ints___reverse_sign), + + /* validate edge cases of capturing relocations */ + { + .case_name = "misc", + .bpf_obj_file = "test_core_reloc_misc.bpf.o", + .btf_src_file = "btf__core_reloc_misc.bpf.o", + .input = (const char *)&(struct core_reloc_misc_extensible[]){ + { .a = 1 }, + { .a = 2 }, /* not read */ + { .a = 3 }, + }, + .input_len = 4 * sizeof(int), + .output = STRUCT_TO_CHAR_PTR(core_reloc_misc_output) { + .a = 1, + .b = 1, + .c = 0, /* BUG in clang, should be 3 */ + }, + .output_len = sizeof(struct core_reloc_misc_output), + .raw_tp_name = "sys_enter", + .prog_name = "test_core_misc", + }, + + /* validate field existence checks */ + { + FIELD_EXISTS_CASE_COMMON(existence), + .input = STRUCT_TO_CHAR_PTR(core_reloc_existence) { + .a = 1, + .b = 2, + .c = 3, + .arr = { 4 }, + .s = { .x = 5 }, + }, + .input_len = sizeof(struct core_reloc_existence), + .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) { + .a_exists = 1, + .b_exists = 1, + .c_exists = 1, + .arr_exists = 1, + .s_exists = 1, + .a_value = 1, + .b_value = 2, + .c_value = 3, + .arr_value = 4, + .s_value = 5, + }, + .output_len = sizeof(struct core_reloc_existence_output), + }, + { + FIELD_EXISTS_CASE_COMMON(existence___minimal), + .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___minimal) { + .a = 42, + }, + .input_len = sizeof(struct core_reloc_existence___minimal), + .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) { + .a_exists = 1, + .b_exists = 0, + .c_exists = 0, + .arr_exists = 0, + .s_exists = 0, + .a_value = 42, + .b_value = 0xff000002u, + .c_value = 0xff000003u, + .arr_value = 0xff000004u, + .s_value = 0xff000005u, + }, + .output_len = sizeof(struct core_reloc_existence_output), + }, + { + FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs), + .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) { + }, + .input_len = sizeof(struct core_reloc_existence___wrong_field_defs), + .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) { + .a_exists = 0, + .b_exists = 0, + .c_exists = 0, + .arr_exists = 0, + .s_exists = 0, + .a_value = 0xff000001u, + .b_value = 0xff000002u, + .c_value = 0xff000003u, + .arr_value = 0xff000004u, + .s_value = 0xff000005u, + }, + .output_len = sizeof(struct core_reloc_existence_output), + }, + + /* bitfield relocation checks */ + BITFIELDS_CASE(bitfields, { + .ub1 = 1, + .ub2 = 2, + .ub7 = 96, + .sb4 = -7, + .sb20 = -0x76543, + .u32 = 0x80000000, + .s32 = -0x76543210, + }), + BITFIELDS_CASE(bitfields___bit_sz_change, { + .ub1 = 6, + .ub2 = 0xABCDE, + .ub7 = 1, + .sb4 = -1, + .sb20 = -0x17654321, + .u32 = 0xBEEF, + .s32 = -0x3FEDCBA987654321LL, + }), + BITFIELDS_CASE(bitfields___bitfield_vs_int, { + .ub1 = 0xFEDCBA9876543210LL, + .ub2 = 0xA6, + .ub7 = -0x7EDCBA987654321LL, + .sb4 = -0x6123456789ABCDELL, + .sb20 = 0xD00DLL, + .u32 = -0x76543, + .s32 = 0x0ADEADBEEFBADB0BLL, + }), + BITFIELDS_CASE(bitfields___just_big_enough, { + .ub1 = 0xFLL, + .ub2 = 0x0812345678FEDCBALL, + }), + BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield), + + /* field size and offset relocation checks */ + SIZE_CASE(size), + SIZE_CASE(size___diff_sz), + SIZE_CASE(size___diff_offs), + SIZE_ERR_CASE(size___err_ambiguous), + + /* validate type existence, match, and size relocations */ + TYPE_BASED_CASE(type_based, { + .struct_exists = 1, + .complex_struct_exists = 1, + .union_exists = 1, + .enum_exists = 1, + .typedef_named_struct_exists = 1, + .typedef_anon_struct_exists = 1, + .typedef_struct_ptr_exists = 1, + .typedef_int_exists = 1, + .typedef_enum_exists = 1, + .typedef_void_ptr_exists = 1, + .typedef_restrict_ptr_exists = 1, + .typedef_func_proto_exists = 1, + .typedef_arr_exists = 1, + + .struct_matches = 1, + .complex_struct_matches = 1, + .union_matches = 1, + .enum_matches = 1, + .typedef_named_struct_matches = 1, + .typedef_anon_struct_matches = 1, + .typedef_struct_ptr_matches = 1, + .typedef_int_matches = 1, + .typedef_enum_matches = 1, + .typedef_void_ptr_matches = 1, + .typedef_restrict_ptr_matches = 1, + .typedef_func_proto_matches = 1, + .typedef_arr_matches = 1, + + .struct_sz = sizeof(struct a_struct), + .union_sz = sizeof(union a_union), + .enum_sz = sizeof(enum an_enum), + .typedef_named_struct_sz = sizeof(named_struct_typedef), + .typedef_anon_struct_sz = sizeof(anon_struct_typedef), + .typedef_struct_ptr_sz = sizeof(struct_ptr_typedef), + .typedef_int_sz = sizeof(int_typedef), + .typedef_enum_sz = sizeof(enum_typedef), + .typedef_void_ptr_sz = sizeof(void_ptr_typedef), + .typedef_func_proto_sz = sizeof(func_proto_typedef), + .typedef_arr_sz = sizeof(arr_typedef), + }), + TYPE_BASED_CASE(type_based___all_missing, { + /* all zeros */ + }), + TYPE_BASED_CASE(type_based___diff, { + .struct_exists = 1, + .complex_struct_exists = 1, + .union_exists = 1, + .enum_exists = 1, + .typedef_named_struct_exists = 1, + .typedef_anon_struct_exists = 1, + .typedef_struct_ptr_exists = 1, + .typedef_int_exists = 1, + .typedef_enum_exists = 1, + .typedef_void_ptr_exists = 1, + .typedef_func_proto_exists = 1, + .typedef_arr_exists = 1, + + .struct_matches = 1, + .complex_struct_matches = 1, + .union_matches = 1, + .enum_matches = 1, + .typedef_named_struct_matches = 1, + .typedef_anon_struct_matches = 1, + .typedef_struct_ptr_matches = 1, + .typedef_int_matches = 0, + .typedef_enum_matches = 1, + .typedef_void_ptr_matches = 1, + .typedef_func_proto_matches = 0, + .typedef_arr_matches = 0, + + .struct_sz = sizeof(struct a_struct___diff), + .union_sz = sizeof(union a_union___diff), + .enum_sz = sizeof(enum an_enum___diff), + .typedef_named_struct_sz = sizeof(named_struct_typedef___diff), + .typedef_anon_struct_sz = sizeof(anon_struct_typedef___diff), + .typedef_struct_ptr_sz = sizeof(struct_ptr_typedef___diff), + .typedef_int_sz = sizeof(int_typedef___diff), + .typedef_enum_sz = sizeof(enum_typedef___diff), + .typedef_void_ptr_sz = sizeof(void_ptr_typedef___diff), + .typedef_func_proto_sz = sizeof(func_proto_typedef___diff), + .typedef_arr_sz = sizeof(arr_typedef___diff), + }), + TYPE_BASED_CASE(type_based___diff_sz, { + .struct_exists = 1, + .union_exists = 1, + .enum_exists = 1, + .typedef_named_struct_exists = 1, + .typedef_anon_struct_exists = 1, + .typedef_struct_ptr_exists = 1, + .typedef_int_exists = 1, + .typedef_enum_exists = 1, + .typedef_void_ptr_exists = 1, + .typedef_func_proto_exists = 1, + .typedef_arr_exists = 1, + + .struct_matches = 0, + .union_matches = 0, + .enum_matches = 0, + .typedef_named_struct_matches = 0, + .typedef_anon_struct_matches = 0, + .typedef_struct_ptr_matches = 1, + .typedef_int_matches = 0, + .typedef_enum_matches = 0, + .typedef_void_ptr_matches = 1, + .typedef_func_proto_matches = 0, + .typedef_arr_matches = 0, + + .struct_sz = sizeof(struct a_struct___diff_sz), + .union_sz = sizeof(union a_union___diff_sz), + .enum_sz = sizeof(enum an_enum___diff_sz), + .typedef_named_struct_sz = sizeof(named_struct_typedef___diff_sz), + .typedef_anon_struct_sz = sizeof(anon_struct_typedef___diff_sz), + .typedef_struct_ptr_sz = sizeof(struct_ptr_typedef___diff_sz), + .typedef_int_sz = sizeof(int_typedef___diff_sz), + .typedef_enum_sz = sizeof(enum_typedef___diff_sz), + .typedef_void_ptr_sz = sizeof(void_ptr_typedef___diff_sz), + .typedef_func_proto_sz = sizeof(func_proto_typedef___diff_sz), + .typedef_arr_sz = sizeof(arr_typedef___diff_sz), + }), + TYPE_BASED_CASE(type_based___incompat, { + .enum_exists = 1, + .enum_matches = 1, + .enum_sz = sizeof(enum an_enum), + }), + TYPE_BASED_CASE(type_based___fn_wrong_args, { + .struct_exists = 1, + .struct_matches = 1, + .struct_sz = sizeof(struct a_struct), + }), + + /* BTF_TYPE_ID_LOCAL/BTF_TYPE_ID_TARGET tests */ + TYPE_ID_CASE(type_id, setup_type_id_case_success), + TYPE_ID_CASE(type_id___missing_targets, setup_type_id_case_failure), + + /* Enumerator value existence and value relocations */ + ENUMVAL_CASE(enumval, { + .named_val1_exists = true, + .named_val2_exists = true, + .named_val3_exists = true, + .anon_val1_exists = true, + .anon_val2_exists = true, + .anon_val3_exists = true, + .named_val1 = 1, + .named_val2 = 2, + .anon_val1 = 0x10, + .anon_val2 = 0x20, + }), + ENUMVAL_CASE(enumval___diff, { + .named_val1_exists = true, + .named_val2_exists = true, + .named_val3_exists = true, + .anon_val1_exists = true, + .anon_val2_exists = true, + .anon_val3_exists = true, + .named_val1 = 101, + .named_val2 = 202, + .anon_val1 = 0x11, + .anon_val2 = 0x22, + }), + ENUMVAL_CASE(enumval___val3_missing, { + .named_val1_exists = true, + .named_val2_exists = true, + .named_val3_exists = false, + .anon_val1_exists = true, + .anon_val2_exists = true, + .anon_val3_exists = false, + .named_val1 = 111, + .named_val2 = 222, + .anon_val1 = 0x111, + .anon_val2 = 0x222, + }), + ENUMVAL_ERR_CASE(enumval___err_missing), + + /* 64bit enumerator value existence and value relocations */ + ENUM64VAL_CASE(enum64val, { + .unsigned_val1_exists = true, + .unsigned_val2_exists = true, + .unsigned_val3_exists = true, + .signed_val1_exists = true, + .signed_val2_exists = true, + .signed_val3_exists = true, + .unsigned_val1 = 0x1ffffffffULL, + .unsigned_val2 = 0x2, + .signed_val1 = 0x1ffffffffLL, + .signed_val2 = -2, + }), + ENUM64VAL_CASE(enum64val___diff, { + .unsigned_val1_exists = true, + .unsigned_val2_exists = true, + .unsigned_val3_exists = true, + .signed_val1_exists = true, + .signed_val2_exists = true, + .signed_val3_exists = true, + .unsigned_val1 = 0x101ffffffffULL, + .unsigned_val2 = 0x202ffffffffULL, + .signed_val1 = -101, + .signed_val2 = -202, + }), + ENUM64VAL_CASE(enum64val___val3_missing, { + .unsigned_val1_exists = true, + .unsigned_val2_exists = true, + .unsigned_val3_exists = false, + .signed_val1_exists = true, + .signed_val2_exists = true, + .signed_val3_exists = false, + .unsigned_val1 = 0x111ffffffffULL, + .unsigned_val2 = 0x222, + .signed_val1 = 0x111ffffffffLL, + .signed_val2 = -222, + }), + ENUM64VAL_ERR_CASE(enum64val___err_missing), +}; + +struct data { + char in[256]; + char out[256]; + bool skip; + uint64_t my_pid_tgid; +}; + +static size_t roundup_page(size_t sz) +{ + long page_size = sysconf(_SC_PAGE_SIZE); + return (sz + page_size - 1) / page_size * page_size; +} + +static int run_btfgen(const char *src_btf, const char *dst_btf, const char *objpath) +{ + char command[4096]; + int n; + + n = snprintf(command, sizeof(command), + "./bpftool gen min_core_btf %s %s %s", + src_btf, dst_btf, objpath); + if (n < 0 || n >= sizeof(command)) + return -1; + + return system(command); +} + +static void run_core_reloc_tests(bool use_btfgen) +{ + const size_t mmap_sz = roundup_page(sizeof(struct data)); + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); + struct core_reloc_test_case *test_case, test_case_copy; + const char *tp_name, *probe_name; + int err, i, equal, fd; + struct bpf_link *link = NULL; + struct bpf_map *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + uint64_t my_pid_tgid; + struct data *data; + void *mmap_data = NULL; + + my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32); + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + char btf_file[] = "/tmp/core_reloc.btf.XXXXXX"; + + test_case_copy = test_cases[i]; + test_case = &test_case_copy; + + if (!test__start_subtest(test_case->case_name)) + continue; + + if (test_case->needs_testmod && !env.has_testmod) { + test__skip(); + continue; + } + + /* generate a "minimal" BTF file and use it as source */ + if (use_btfgen) { + + if (!test_case->btf_src_file || test_case->run_btfgen_fails) { + test__skip(); + continue; + } + + fd = mkstemp(btf_file); + if (!ASSERT_GE(fd, 0, "btf_tmp")) + continue; + close(fd); /* we only need the path */ + err = run_btfgen(test_case->btf_src_file, btf_file, + test_case->bpf_obj_file); + if (!ASSERT_OK(err, "run_btfgen")) + continue; + + test_case->btf_src_file = btf_file; + } + + if (test_case->setup) { + err = test_case->setup(test_case); + if (CHECK(err, "test_setup", "test #%d setup failed: %d\n", i, err)) + continue; + } + + if (test_case->btf_src_file) { + err = access(test_case->btf_src_file, R_OK); + if (!ASSERT_OK(err, "btf_src_file")) + continue; + } + + open_opts.btf_custom_path = test_case->btf_src_file; + obj = bpf_object__open_file(test_case->bpf_obj_file, &open_opts); + if (!ASSERT_OK_PTR(obj, "obj_open")) + goto cleanup; + + probe_name = test_case->prog_name; + tp_name = test_case->raw_tp_name; /* NULL for tp_btf */ + prog = bpf_object__find_program_by_name(obj, probe_name); + if (CHECK(!prog, "find_probe", + "prog '%s' not found\n", probe_name)) + goto cleanup; + + err = bpf_object__load(obj); + if (err) { + if (!test_case->fails) + ASSERT_OK(err, "obj_load"); + goto cleanup; + } + + data_map = bpf_object__find_map_by_name(obj, ".bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto cleanup; + + mmap_data = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, + MAP_SHARED, bpf_map__fd(data_map), 0); + if (CHECK(mmap_data == MAP_FAILED, "mmap", + ".bss mmap failed: %d", errno)) { + mmap_data = NULL; + goto cleanup; + } + data = mmap_data; + + memset(mmap_data, 0, sizeof(*data)); + if (test_case->input_len) + memcpy(data->in, test_case->input, test_case->input_len); + data->my_pid_tgid = my_pid_tgid; + + link = bpf_program__attach_raw_tracepoint(prog, tp_name); + if (!ASSERT_OK_PTR(link, "attach_raw_tp")) + goto cleanup; + + /* trigger test run */ + if (test_case->trigger) { + if (!ASSERT_OK(test_case->trigger(test_case), "test_trigger")) + goto cleanup; + } else { + usleep(1); + } + + if (data->skip) { + test__skip(); + goto cleanup; + } + + if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail")) + goto cleanup; + + equal = memcmp(data->out, test_case->output, + test_case->output_len) == 0; + if (CHECK(!equal, "check_result", + "input/output data don't match\n")) { + int j; + + for (j = 0; j < test_case->input_len; j++) { + printf("input byte #%d: 0x%02hhx\n", + j, test_case->input[j]); + } + for (j = 0; j < test_case->output_len; j++) { + printf("output byte #%d: EXP 0x%02hhx GOT 0x%02hhx\n", + j, test_case->output[j], data->out[j]); + } + goto cleanup; + } + +cleanup: + if (mmap_data) { + CHECK_FAIL(munmap(mmap_data, mmap_sz)); + mmap_data = NULL; + } + if (use_btfgen) + remove(test_case->btf_src_file); + bpf_link__destroy(link); + link = NULL; + bpf_object__close(obj); + } +} + +void test_core_reloc(void) +{ + run_core_reloc_tests(false); +} + +void test_core_reloc_btfgen(void) +{ + run_core_reloc_tests(true); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c new file mode 100644 index 000000000..4a2c256c8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_retro.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#define _GNU_SOURCE +#include <test_progs.h> +#include "test_core_retro.skel.h" + +void test_core_retro(void) +{ + int err, zero = 0, res, my_pid = getpid(); + struct test_core_retro *skel; + + /* load program */ + skel = test_core_retro__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto out_close; + + err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero), + &my_pid, sizeof(my_pid), 0); + if (!ASSERT_OK(err, "map_update")) + goto out_close; + + /* attach probe */ + err = test_core_retro__attach(skel); + if (!ASSERT_OK(err, "attach_kprobe")) + goto out_close; + + /* trigger */ + usleep(1); + + err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0); + if (!ASSERT_OK(err, "map_lookup")) + goto out_close; + + ASSERT_EQ(res, my_pid, "pid_check"); + +out_close: + test_core_retro__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cpu_mask.c b/tools/testing/selftests/bpf/prog_tests/cpu_mask.c new file mode 100644 index 000000000..f7c7e2523 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cpu_mask.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <bpf/btf.h> +#include "bpf/libbpf_internal.h" + +static int duration = 0; + +static void validate_mask(int case_nr, const char *exp, bool *mask, int n) +{ + int i; + + for (i = 0; exp[i]; i++) { + if (exp[i] == '1') { + if (CHECK(i + 1 > n, "mask_short", + "case #%d: mask too short, got n=%d, need at least %d\n", + case_nr, n, i + 1)) + return; + CHECK(!mask[i], "cpu_not_set", + "case #%d: mask differs, expected cpu#%d SET\n", + case_nr, i); + } else { + CHECK(i < n && mask[i], "cpu_set", + "case #%d: mask differs, expected cpu#%d UNSET\n", + case_nr, i); + } + } + CHECK(i < n, "mask_long", + "case #%d: mask too long, got n=%d, expected at most %d\n", + case_nr, n, i); +} + +static struct { + const char *cpu_mask; + const char *expect; + bool fails; +} test_cases[] = { + { "0\n", "1", false }, + { "0,2\n", "101", false }, + { "0-2\n", "111", false }, + { "0-2,3-4\n", "11111", false }, + { "0", "1", false }, + { "0-2", "111", false }, + { "0,2", "101", false }, + { "0,1-3", "1111", false }, + { "0,1,2,3", "1111", false }, + { "0,2-3,5", "101101", false }, + { "3-3", "0001", false }, + { "2-4,6,9-10", "00111010011", false }, + /* failure cases */ + { "", "", true }, + { "0-", "", true }, + { "0 ", "", true }, + { "0_1", "", true }, + { "1-0", "", true }, + { "-1", "", true }, +}; + +void test_cpu_mask() +{ + int i, err, n; + bool *mask; + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + mask = NULL; + err = parse_cpu_mask_str(test_cases[i].cpu_mask, &mask, &n); + if (test_cases[i].fails) { + CHECK(!err, "should_fail", + "case #%d: parsing should fail!\n", i + 1); + } else { + if (CHECK(err, "parse_err", + "case #%d: cpu mask parsing failed: %d\n", + i + 1, err)) + continue; + validate_mask(i + 1, test_cases[i].expect, mask, n); + } + free(mask); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c new file mode 100644 index 000000000..b2dfc5954 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include <test_progs.h> +#include "test_custom_sec_handlers.skel.h" + +#define COOKIE_ABC1 1 +#define COOKIE_ABC2 2 +#define COOKIE_CUSTOM 3 +#define COOKIE_FALLBACK 4 +#define COOKIE_KPROBE 5 + +static int custom_setup_prog(struct bpf_program *prog, long cookie) +{ + if (cookie == COOKIE_ABC1) + bpf_program__set_autoload(prog, false); + + return 0; +} + +static int custom_prepare_load_prog(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie) +{ + if (cookie == COOKIE_FALLBACK) + opts->prog_flags |= BPF_F_SLEEPABLE; + else if (cookie == COOKIE_ABC1) + ASSERT_FALSE(true, "unexpected preload for abc"); + + return 0; +} + +static int custom_attach_prog(const struct bpf_program *prog, long cookie, + struct bpf_link **link) +{ + switch (cookie) { + case COOKIE_ABC2: + *link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + return libbpf_get_error(*link); + case COOKIE_CUSTOM: + *link = bpf_program__attach_tracepoint(prog, "syscalls", "sys_enter_nanosleep"); + return libbpf_get_error(*link); + case COOKIE_KPROBE: + case COOKIE_FALLBACK: + /* no auto-attach for SEC("xyz") and SEC("kprobe") */ + *link = NULL; + return 0; + default: + ASSERT_FALSE(true, "unexpected cookie"); + return -EINVAL; + } +} + +static int abc1_id; +static int abc2_id; +static int custom_id; +static int fallback_id; +static int kprobe_id; + +__attribute__((constructor)) +static void register_sec_handlers(void) +{ + LIBBPF_OPTS(libbpf_prog_handler_opts, abc1_opts, + .cookie = COOKIE_ABC1, + .prog_setup_fn = custom_setup_prog, + .prog_prepare_load_fn = custom_prepare_load_prog, + .prog_attach_fn = NULL, + ); + LIBBPF_OPTS(libbpf_prog_handler_opts, abc2_opts, + .cookie = COOKIE_ABC2, + .prog_setup_fn = custom_setup_prog, + .prog_prepare_load_fn = custom_prepare_load_prog, + .prog_attach_fn = custom_attach_prog, + ); + LIBBPF_OPTS(libbpf_prog_handler_opts, custom_opts, + .cookie = COOKIE_CUSTOM, + .prog_setup_fn = NULL, + .prog_prepare_load_fn = NULL, + .prog_attach_fn = custom_attach_prog, + ); + + abc1_id = libbpf_register_prog_handler("abc", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc1_opts); + abc2_id = libbpf_register_prog_handler("abc/", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc2_opts); + custom_id = libbpf_register_prog_handler("custom+", BPF_PROG_TYPE_TRACEPOINT, 0, &custom_opts); +} + +__attribute__((destructor)) +static void unregister_sec_handlers(void) +{ + libbpf_unregister_prog_handler(abc1_id); + libbpf_unregister_prog_handler(abc2_id); + libbpf_unregister_prog_handler(custom_id); +} + +void test_custom_sec_handlers(void) +{ + LIBBPF_OPTS(libbpf_prog_handler_opts, opts, + .prog_setup_fn = custom_setup_prog, + .prog_prepare_load_fn = custom_prepare_load_prog, + .prog_attach_fn = custom_attach_prog, + ); + struct test_custom_sec_handlers* skel; + int err; + + ASSERT_GT(abc1_id, 0, "abc1_id"); + ASSERT_GT(abc2_id, 0, "abc2_id"); + ASSERT_GT(custom_id, 0, "custom_id"); + + /* override libbpf's handle of SEC("kprobe/...") but also allow pure + * SEC("kprobe") due to "kprobe+" specifier. Register it as + * TRACEPOINT, just for fun. + */ + opts.cookie = COOKIE_KPROBE; + kprobe_id = libbpf_register_prog_handler("kprobe+", BPF_PROG_TYPE_TRACEPOINT, 0, &opts); + /* fallback treats everything as BPF_PROG_TYPE_SYSCALL program to test + * setting custom BPF_F_SLEEPABLE bit in preload handler + */ + opts.cookie = COOKIE_FALLBACK; + fallback_id = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_SYSCALL, 0, &opts); + + if (!ASSERT_GT(fallback_id, 0, "fallback_id") /* || !ASSERT_GT(kprobe_id, 0, "kprobe_id")*/) { + if (fallback_id > 0) + libbpf_unregister_prog_handler(fallback_id); + if (kprobe_id > 0) + libbpf_unregister_prog_handler(kprobe_id); + return; + } + + /* open skeleton and validate assumptions */ + skel = test_custom_sec_handlers__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__type(skel->progs.abc1), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc1_type"); + ASSERT_FALSE(bpf_program__autoload(skel->progs.abc1), "abc1_autoload"); + + ASSERT_EQ(bpf_program__type(skel->progs.abc2), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc2_type"); + ASSERT_EQ(bpf_program__type(skel->progs.custom1), BPF_PROG_TYPE_TRACEPOINT, "custom1_type"); + ASSERT_EQ(bpf_program__type(skel->progs.custom2), BPF_PROG_TYPE_TRACEPOINT, "custom2_type"); + ASSERT_EQ(bpf_program__type(skel->progs.kprobe1), BPF_PROG_TYPE_TRACEPOINT, "kprobe1_type"); + ASSERT_EQ(bpf_program__type(skel->progs.xyz), BPF_PROG_TYPE_SYSCALL, "xyz_type"); + + skel->rodata->my_pid = getpid(); + + /* now attempt to load everything */ + err = test_custom_sec_handlers__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + /* now try to auto-attach everything */ + err = test_custom_sec_handlers__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + skel->links.xyz = bpf_program__attach(skel->progs.kprobe1); + ASSERT_EQ(errno, EOPNOTSUPP, "xyz_attach_err"); + ASSERT_ERR_PTR(skel->links.xyz, "xyz_attach"); + + /* trigger programs */ + usleep(1); + + /* SEC("abc") is set to not auto-loaded */ + ASSERT_FALSE(skel->bss->abc1_called, "abc1_called"); + ASSERT_TRUE(skel->bss->abc2_called, "abc2_called"); + ASSERT_TRUE(skel->bss->custom1_called, "custom1_called"); + ASSERT_TRUE(skel->bss->custom2_called, "custom2_called"); + /* SEC("kprobe") shouldn't be auto-attached */ + ASSERT_FALSE(skel->bss->kprobe1_called, "kprobe1_called"); + /* SEC("xyz") shouldn't be auto-attached */ + ASSERT_FALSE(skel->bss->xyz_called, "xyz_called"); + +cleanup: + test_custom_sec_handlers__destroy(skel); + + ASSERT_OK(libbpf_unregister_prog_handler(fallback_id), "unregister_fallback"); + ASSERT_OK(libbpf_unregister_prog_handler(kprobe_id), "unregister_kprobe"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/d_path.c b/tools/testing/selftests/bpf/prog_tests/d_path.c new file mode 100644 index 000000000..911345c52 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/d_path.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <test_progs.h> +#include <sys/stat.h> +#include <linux/sched.h> +#include <sys/syscall.h> + +#define MAX_PATH_LEN 128 +#define MAX_FILES 7 + +#include "test_d_path.skel.h" +#include "test_d_path_check_rdonly_mem.skel.h" +#include "test_d_path_check_types.skel.h" + +static int duration; + +static struct { + __u32 cnt; + char paths[MAX_FILES][MAX_PATH_LEN]; +} src; + +static int set_pathname(int fd, pid_t pid) +{ + char buf[MAX_PATH_LEN]; + + snprintf(buf, MAX_PATH_LEN, "/proc/%d/fd/%d", pid, fd); + return readlink(buf, src.paths[src.cnt++], MAX_PATH_LEN); +} + +static int trigger_fstat_events(pid_t pid) +{ + int sockfd = -1, procfd = -1, devfd = -1; + int localfd = -1, indicatorfd = -1; + int pipefd[2] = { -1, -1 }; + struct stat fileStat; + int ret = -1; + + /* unmountable pseudo-filesystems */ + if (CHECK(pipe(pipefd) < 0, "trigger", "pipe failed\n")) + return ret; + /* unmountable pseudo-filesystems */ + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (CHECK(sockfd < 0, "trigger", "socket failed\n")) + goto out_close; + /* mountable pseudo-filesystems */ + procfd = open("/proc/self/comm", O_RDONLY); + if (CHECK(procfd < 0, "trigger", "open /proc/self/comm failed\n")) + goto out_close; + devfd = open("/dev/urandom", O_RDONLY); + if (CHECK(devfd < 0, "trigger", "open /dev/urandom failed\n")) + goto out_close; + localfd = open("/tmp/d_path_loadgen.txt", O_CREAT | O_RDONLY, 0644); + if (CHECK(localfd < 0, "trigger", "open /tmp/d_path_loadgen.txt failed\n")) + goto out_close; + /* bpf_d_path will return path with (deleted) */ + remove("/tmp/d_path_loadgen.txt"); + indicatorfd = open("/tmp/", O_PATH); + if (CHECK(indicatorfd < 0, "trigger", "open /tmp/ failed\n")) + goto out_close; + + ret = set_pathname(pipefd[0], pid); + if (CHECK(ret < 0, "trigger", "set_pathname failed for pipe[0]\n")) + goto out_close; + ret = set_pathname(pipefd[1], pid); + if (CHECK(ret < 0, "trigger", "set_pathname failed for pipe[1]\n")) + goto out_close; + ret = set_pathname(sockfd, pid); + if (CHECK(ret < 0, "trigger", "set_pathname failed for socket\n")) + goto out_close; + ret = set_pathname(procfd, pid); + if (CHECK(ret < 0, "trigger", "set_pathname failed for proc\n")) + goto out_close; + ret = set_pathname(devfd, pid); + if (CHECK(ret < 0, "trigger", "set_pathname failed for dev\n")) + goto out_close; + ret = set_pathname(localfd, pid); + if (CHECK(ret < 0, "trigger", "set_pathname failed for file\n")) + goto out_close; + ret = set_pathname(indicatorfd, pid); + if (CHECK(ret < 0, "trigger", "set_pathname failed for dir\n")) + goto out_close; + + /* triggers vfs_getattr */ + fstat(pipefd[0], &fileStat); + fstat(pipefd[1], &fileStat); + fstat(sockfd, &fileStat); + fstat(procfd, &fileStat); + fstat(devfd, &fileStat); + fstat(localfd, &fileStat); + fstat(indicatorfd, &fileStat); + +out_close: + /* triggers filp_close */ + close(pipefd[0]); + close(pipefd[1]); + close(sockfd); + close(procfd); + close(devfd); + close(localfd); + close(indicatorfd); + return ret; +} + +static void test_d_path_basic(void) +{ + struct test_d_path__bss *bss; + struct test_d_path *skel; + int err; + + skel = test_d_path__open_and_load(); + if (CHECK(!skel, "setup", "d_path skeleton failed\n")) + goto cleanup; + + err = test_d_path__attach(skel); + if (CHECK(err, "setup", "attach failed: %d\n", err)) + goto cleanup; + + bss = skel->bss; + bss->my_pid = getpid(); + + err = trigger_fstat_events(bss->my_pid); + if (err < 0) + goto cleanup; + + if (CHECK(!bss->called_stat, + "stat", + "trampoline for security_inode_getattr was not called\n")) + goto cleanup; + + if (CHECK(!bss->called_close, + "close", + "trampoline for filp_close was not called\n")) + goto cleanup; + + for (int i = 0; i < MAX_FILES; i++) { + CHECK(strncmp(src.paths[i], bss->paths_stat[i], MAX_PATH_LEN), + "check", + "failed to get stat path[%d]: %s vs %s\n", + i, src.paths[i], bss->paths_stat[i]); + CHECK(strncmp(src.paths[i], bss->paths_close[i], MAX_PATH_LEN), + "check", + "failed to get close path[%d]: %s vs %s\n", + i, src.paths[i], bss->paths_close[i]); + /* The d_path helper returns size plus NUL char, hence + 1 */ + CHECK(bss->rets_stat[i] != strlen(bss->paths_stat[i]) + 1, + "check", + "failed to match stat return [%d]: %d vs %zd [%s]\n", + i, bss->rets_stat[i], strlen(bss->paths_stat[i]) + 1, + bss->paths_stat[i]); + CHECK(bss->rets_close[i] != strlen(bss->paths_stat[i]) + 1, + "check", + "failed to match stat return [%d]: %d vs %zd [%s]\n", + i, bss->rets_close[i], strlen(bss->paths_close[i]) + 1, + bss->paths_stat[i]); + } + +cleanup: + test_d_path__destroy(skel); +} + +static void test_d_path_check_rdonly_mem(void) +{ + struct test_d_path_check_rdonly_mem *skel; + + skel = test_d_path_check_rdonly_mem__open_and_load(); + ASSERT_ERR_PTR(skel, "unexpected_load_overwriting_rdonly_mem"); + + test_d_path_check_rdonly_mem__destroy(skel); +} + +static void test_d_path_check_types(void) +{ + struct test_d_path_check_types *skel; + + skel = test_d_path_check_types__open_and_load(); + ASSERT_ERR_PTR(skel, "unexpected_load_passing_wrong_type"); + + test_d_path_check_types__destroy(skel); +} + +void test_d_path(void) +{ + if (test__start_subtest("basic")) + test_d_path_basic(); + + if (test__start_subtest("check_rdonly_mem")) + test_d_path_check_rdonly_mem(); + + if (test__start_subtest("check_alloc_mem")) + test_d_path_check_types(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/deny_namespace.c b/tools/testing/selftests/bpf/prog_tests/deny_namespace.c new file mode 100644 index 000000000..1bc6241b7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/deny_namespace.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <test_progs.h> +#include "test_deny_namespace.skel.h" +#include <sched.h> +#include "cap_helpers.h" +#include <stdio.h> + +static int wait_for_pid(pid_t pid) +{ + int status, ret; + +again: + ret = waitpid(pid, &status, 0); + if (ret == -1) { + if (errno == EINTR) + goto again; + + return -1; + } + + if (!WIFEXITED(status)) + return -1; + + return WEXITSTATUS(status); +} + +/* negative return value -> some internal error + * positive return value -> userns creation failed + * 0 -> userns creation succeeded + */ +static int create_user_ns(void) +{ + pid_t pid; + + pid = fork(); + if (pid < 0) + return -1; + + if (pid == 0) { + if (unshare(CLONE_NEWUSER)) + _exit(EXIT_FAILURE); + _exit(EXIT_SUCCESS); + } + + return wait_for_pid(pid); +} + +static void test_userns_create_bpf(void) +{ + __u32 cap_mask = 1ULL << CAP_SYS_ADMIN; + __u64 old_caps = 0; + + cap_enable_effective(cap_mask, &old_caps); + + ASSERT_OK(create_user_ns(), "priv new user ns"); + + cap_disable_effective(cap_mask, &old_caps); + + ASSERT_EQ(create_user_ns(), EPERM, "unpriv new user ns"); + + if (cap_mask & old_caps) + cap_enable_effective(cap_mask, NULL); +} + +static void test_unpriv_userns_create_no_bpf(void) +{ + __u32 cap_mask = 1ULL << CAP_SYS_ADMIN; + __u64 old_caps = 0; + + cap_disable_effective(cap_mask, &old_caps); + + ASSERT_OK(create_user_ns(), "no-bpf unpriv new user ns"); + + if (cap_mask & old_caps) + cap_enable_effective(cap_mask, NULL); +} + +void test_deny_namespace(void) +{ + struct test_deny_namespace *skel = NULL; + int err; + + if (test__start_subtest("unpriv_userns_create_no_bpf")) + test_unpriv_userns_create_no_bpf(); + + skel = test_deny_namespace__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel load")) + goto close_prog; + + err = test_deny_namespace__attach(skel); + if (!ASSERT_OK(err, "attach")) + goto close_prog; + + if (test__start_subtest("userns_create_bpf")) + test_userns_create_bpf(); + + test_deny_namespace__detach(skel); + +close_prog: + test_deny_namespace__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c new file mode 100644 index 000000000..c11832657 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021. Huawei Technologies Co., Ltd */ +#include <test_progs.h> +#include "dummy_st_ops.skel.h" +#include "trace_dummy_st_ops.skel.h" + +/* Need to keep consistent with definition in include/linux/bpf.h */ +struct bpf_dummy_ops_state { + int val; +}; + +static void test_dummy_st_ops_attach(void) +{ + struct dummy_st_ops *skel; + struct bpf_link *link; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.dummy_1); + ASSERT_EQ(libbpf_get_error(link), -EOPNOTSUPP, "dummy_st_ops_attach"); + + dummy_st_ops__destroy(skel); +} + +static void test_dummy_init_ret_value(void) +{ + __u64 args[1] = {0}; + LIBBPF_OPTS(bpf_test_run_opts, attr, + .ctx_in = args, + .ctx_size_in = sizeof(args), + ); + struct dummy_st_ops *skel; + int fd, err; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + fd = bpf_program__fd(skel->progs.test_1); + err = bpf_prog_test_run_opts(fd, &attr); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret"); + + dummy_st_ops__destroy(skel); +} + +static void test_dummy_init_ptr_arg(void) +{ + int exp_retval = 0xbeef; + struct bpf_dummy_ops_state in_state = { + .val = exp_retval, + }; + __u64 args[1] = {(unsigned long)&in_state}; + LIBBPF_OPTS(bpf_test_run_opts, attr, + .ctx_in = args, + .ctx_size_in = sizeof(args), + ); + struct trace_dummy_st_ops *trace_skel; + struct dummy_st_ops *skel; + int fd, err; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + fd = bpf_program__fd(skel->progs.test_1); + + trace_skel = trace_dummy_st_ops__open(); + if (!ASSERT_OK_PTR(trace_skel, "trace_dummy_st_ops__open")) + goto done; + + err = bpf_program__set_attach_target(trace_skel->progs.fentry_test_1, + fd, "test_1"); + if (!ASSERT_OK(err, "set_attach_target(fentry_test_1)")) + goto done; + + err = trace_dummy_st_ops__load(trace_skel); + if (!ASSERT_OK(err, "load(trace_skel)")) + goto done; + + err = trace_dummy_st_ops__attach(trace_skel); + if (!ASSERT_OK(err, "attach(trace_skel)")) + goto done; + + err = bpf_prog_test_run_opts(fd, &attr); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret"); + ASSERT_EQ(attr.retval, exp_retval, "test_ret"); + ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val"); + +done: + dummy_st_ops__destroy(skel); + trace_dummy_st_ops__destroy(trace_skel); +} + +static void test_dummy_multiple_args(void) +{ + __u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; + LIBBPF_OPTS(bpf_test_run_opts, attr, + .ctx_in = args, + .ctx_size_in = sizeof(args), + ); + struct dummy_st_ops *skel; + int fd, err; + size_t i; + char name[8]; + + skel = dummy_st_ops__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) + return; + + fd = bpf_program__fd(skel->progs.test_2); + err = bpf_prog_test_run_opts(fd, &attr); + ASSERT_OK(err, "test_run"); + for (i = 0; i < ARRAY_SIZE(args); i++) { + snprintf(name, sizeof(name), "arg %zu", i); + ASSERT_EQ(skel->bss->test_2_args[i], args[i], name); + } + + dummy_st_ops__destroy(skel); +} + +void test_dummy_st_ops(void) +{ + if (test__start_subtest("dummy_st_ops_attach")) + test_dummy_st_ops_attach(); + if (test__start_subtest("dummy_init_ret_value")) + test_dummy_init_ret_value(); + if (test__start_subtest("dummy_init_ptr_arg")) + test_dummy_init_ptr_arg(); + if (test__start_subtest("dummy_multiple_args")) + test_dummy_multiple_args(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c new file mode 100644 index 000000000..8fc4e6c02 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include <test_progs.h> +#include "dynptr_fail.skel.h" +#include "dynptr_success.skel.h" + +static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576]; + +static struct { + const char *prog_name; + const char *expected_err_msg; +} dynptr_tests[] = { + /* failure cases */ + {"ringbuf_missing_release1", "Unreleased reference id=1"}, + {"ringbuf_missing_release2", "Unreleased reference id=2"}, + {"ringbuf_missing_release_callback", "Unreleased reference id"}, + {"use_after_invalid", "Expected an initialized dynptr as arg #3"}, + {"ringbuf_invalid_api", "type=mem expected=alloc_mem"}, + {"add_dynptr_to_map1", "invalid indirect read from stack"}, + {"add_dynptr_to_map2", "invalid indirect read from stack"}, + {"data_slice_out_of_bounds_ringbuf", "value is outside of the allowed memory range"}, + {"data_slice_out_of_bounds_map_value", "value is outside of the allowed memory range"}, + {"data_slice_use_after_release1", "invalid mem access 'scalar'"}, + {"data_slice_use_after_release2", "invalid mem access 'scalar'"}, + {"data_slice_missing_null_check1", "invalid mem access 'mem_or_null'"}, + {"data_slice_missing_null_check2", "invalid mem access 'mem_or_null'"}, + {"invalid_helper1", "invalid indirect read from stack"}, + {"invalid_helper2", "Expected an initialized dynptr as arg #3"}, + {"invalid_write1", "Expected an initialized dynptr as arg #1"}, + {"invalid_write2", "Expected an initialized dynptr as arg #3"}, + {"invalid_write3", "Expected an initialized dynptr as arg #1"}, + {"invalid_write4", "arg 1 is an unacquired reference"}, + {"invalid_read1", "invalid read from stack"}, + {"invalid_read2", "cannot pass in dynptr at an offset"}, + {"invalid_read3", "invalid read from stack"}, + {"invalid_read4", "invalid read from stack"}, + {"invalid_offset", "invalid write to stack"}, + {"global", "type=map_value expected=fp"}, + {"release_twice", "arg 1 is an unacquired reference"}, + {"release_twice_callback", "arg 1 is an unacquired reference"}, + {"dynptr_from_mem_invalid_api", + "Unsupported reg type fp for bpf_dynptr_from_mem data"}, + + /* success cases */ + {"test_read_write", NULL}, + {"test_data_slice", NULL}, + {"test_ringbuf", NULL}, +}; + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct bpf_program *prog; + struct dynptr_fail *skel; + int err; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = dynptr_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + err = dynptr_fail__load(skel); + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + dynptr_fail__destroy(skel); +} + +static void verify_success(const char *prog_name) +{ + struct dynptr_success *skel; + struct bpf_program *prog; + struct bpf_link *link; + + skel = dynptr_success__open(); + if (!ASSERT_OK_PTR(skel, "dynptr_success__open")) + return; + + skel->bss->pid = getpid(); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + dynptr_success__load(skel); + if (!ASSERT_OK_PTR(skel, "dynptr_success__load")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->err, 0, "err"); + + bpf_link__destroy(link); + +cleanup: + dynptr_success__destroy(skel); +} + +void test_dynptr(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dynptr_tests); i++) { + if (!test__start_subtest(dynptr_tests[i].prog_name)) + continue; + + if (dynptr_tests[i].expected_err_msg) + verify_fail(dynptr_tests[i].prog_name, + dynptr_tests[i].expected_err_msg); + else + verify_success(dynptr_tests[i].prog_name); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c new file mode 100644 index 000000000..0613f3bb8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include <net/if.h> +#include "empty_skb.skel.h" + +#define SYS(cmd) ({ \ + if (!ASSERT_OK(system(cmd), (cmd))) \ + goto out; \ +}) + +void serial_test_empty_skb(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, tattr); + struct empty_skb *bpf_obj = NULL; + struct nstoken *tok = NULL; + struct bpf_program *prog; + char eth_hlen_pp[15]; + char eth_hlen[14]; + int veth_ifindex; + int ipip_ifindex; + int err; + int i; + + struct { + const char *msg; + const void *data_in; + __u32 data_size_in; + int *ifindex; + int err; + int ret; + bool success_on_tc; + } tests[] = { + /* Empty packets are always rejected. */ + + { + /* BPF_PROG_RUN ETH_HLEN size check */ + .msg = "veth empty ingress packet", + .data_in = NULL, + .data_size_in = 0, + .ifindex = &veth_ifindex, + .err = -EINVAL, + }, + { + /* BPF_PROG_RUN ETH_HLEN size check */ + .msg = "ipip empty ingress packet", + .data_in = NULL, + .data_size_in = 0, + .ifindex = &ipip_ifindex, + .err = -EINVAL, + }, + + /* ETH_HLEN-sized packets: + * - can not be redirected at LWT_XMIT + * - can be redirected at TC to non-tunneling dest + */ + + { + /* __bpf_redirect_common */ + .msg = "veth ETH_HLEN packet ingress", + .data_in = eth_hlen, + .data_size_in = sizeof(eth_hlen), + .ifindex = &veth_ifindex, + .ret = -ERANGE, + .success_on_tc = true, + }, + { + /* __bpf_redirect_no_mac + * + * lwt: skb->len=0 <= skb_network_offset=0 + * tc: skb->len=14 <= skb_network_offset=14 + */ + .msg = "ipip ETH_HLEN packet ingress", + .data_in = eth_hlen, + .data_size_in = sizeof(eth_hlen), + .ifindex = &ipip_ifindex, + .ret = -ERANGE, + }, + + /* ETH_HLEN+1-sized packet should be redirected. */ + + { + .msg = "veth ETH_HLEN+1 packet ingress", + .data_in = eth_hlen_pp, + .data_size_in = sizeof(eth_hlen_pp), + .ifindex = &veth_ifindex, + }, + { + .msg = "ipip ETH_HLEN+1 packet ingress", + .data_in = eth_hlen_pp, + .data_size_in = sizeof(eth_hlen_pp), + .ifindex = &ipip_ifindex, + }, + }; + + SYS("ip netns add empty_skb"); + tok = open_netns("empty_skb"); + SYS("ip link add veth0 type veth peer veth1"); + SYS("ip link set dev veth0 up"); + SYS("ip link set dev veth1 up"); + SYS("ip addr add 10.0.0.1/8 dev veth0"); + SYS("ip addr add 10.0.0.2/8 dev veth1"); + veth_ifindex = if_nametoindex("veth0"); + + SYS("ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2"); + SYS("ip link set ipip0 up"); + SYS("ip addr add 192.168.1.1/16 dev ipip0"); + ipip_ifindex = if_nametoindex("ipip0"); + + bpf_obj = empty_skb__open_and_load(); + if (!ASSERT_OK_PTR(bpf_obj, "open skeleton")) + goto out; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + bpf_object__for_each_program(prog, bpf_obj->obj) { + char buf[128]; + bool at_tc = !strncmp(bpf_program__section_name(prog), "tc", 2); + + tattr.data_in = tests[i].data_in; + tattr.data_size_in = tests[i].data_size_in; + + tattr.data_size_out = 0; + bpf_obj->bss->ifindex = *tests[i].ifindex; + bpf_obj->bss->ret = 0; + err = bpf_prog_test_run_opts(bpf_program__fd(prog), &tattr); + sprintf(buf, "err: %s [%s]", tests[i].msg, bpf_program__name(prog)); + + if (at_tc && tests[i].success_on_tc) + ASSERT_GE(err, 0, buf); + else + ASSERT_EQ(err, tests[i].err, buf); + sprintf(buf, "ret: %s [%s]", tests[i].msg, bpf_program__name(prog)); + if (at_tc && tests[i].success_on_tc) + ASSERT_GE(bpf_obj->bss->ret, 0, buf); + else + ASSERT_EQ(bpf_obj->bss->ret, tests[i].ret, buf); + } + } + +out: + if (bpf_obj) + empty_skb__destroy(bpf_obj); + if (tok) + close_netns(tok); + system("ip netns del empty_skb"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/enable_stats.c b/tools/testing/selftests/bpf/prog_tests/enable_stats.c new file mode 100644 index 000000000..2cb208591 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/enable_stats.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "test_enable_stats.skel.h" + +void test_enable_stats(void) +{ + struct test_enable_stats *skel; + int stats_fd, err, prog_fd; + struct bpf_prog_info info; + __u32 info_len = sizeof(info); + int duration = 0; + + skel = test_enable_stats__open_and_load(); + if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n")) + return; + + stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME); + if (CHECK(stats_fd < 0, "get_stats_fd", "failed %d\n", errno)) { + test_enable_stats__destroy(skel); + return; + } + + err = test_enable_stats__attach(skel); + if (CHECK(err, "attach_raw_tp", "err %d\n", err)) + goto cleanup; + + test_enable_stats__detach(skel); + + prog_fd = bpf_program__fd(skel->progs.test_enable_stats); + memset(&info, 0, info_len); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (CHECK(err, "get_prog_info", + "failed to get bpf_prog_info for fd %d\n", prog_fd)) + goto cleanup; + if (CHECK(info.run_time_ns == 0, "check_stats_enabled", + "failed to enable run_time_ns stats\n")) + goto cleanup; + + CHECK(info.run_cnt != skel->bss->count, "check_run_cnt_valid", + "invalid run_cnt stats\n"); + +cleanup: + test_enable_stats__destroy(skel); + close(stats_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/endian.c b/tools/testing/selftests/bpf/prog_tests/endian.c new file mode 100644 index 000000000..1a11612ac --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/endian.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include "test_endian.skel.h" + +static int duration; + +#define IN16 0x1234 +#define IN32 0x12345678U +#define IN64 0x123456789abcdef0ULL + +#define OUT16 0x3412 +#define OUT32 0x78563412U +#define OUT64 0xf0debc9a78563412ULL + +void test_endian(void) +{ + struct test_endian* skel; + struct test_endian__bss *bss; + int err; + + skel = test_endian__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + bss = skel->bss; + + bss->in16 = IN16; + bss->in32 = IN32; + bss->in64 = IN64; + + err = test_endian__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + usleep(1); + + CHECK(bss->out16 != OUT16, "out16", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->out16, (__u64)OUT16); + CHECK(bss->out32 != OUT32, "out32", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->out32, (__u64)OUT32); + CHECK(bss->out64 != OUT64, "out16", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->out64, (__u64)OUT64); + + CHECK(bss->const16 != OUT16, "const16", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->const16, (__u64)OUT16); + CHECK(bss->const32 != OUT32, "const32", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->const32, (__u64)OUT32); + CHECK(bss->const64 != OUT64, "const64", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->const64, (__u64)OUT64); +cleanup: + test_endian__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/exhandler.c b/tools/testing/selftests/bpf/prog_tests/exhandler.c new file mode 100644 index 000000000..118bb182e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/exhandler.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021, Oracle and/or its affiliates. */ + +#include <test_progs.h> + +/* Test that verifies exception handling is working. fork() + * triggers task_newtask tracepoint; that new task will have a + * NULL pointer task_works, and the associated task->task_works->func + * should not be NULL if task_works itself is non-NULL. + * + * So to verify exception handling we want to see a NULL task_works + * and task_works->func; if we see this we can conclude that the + * exception handler ran when we attempted to dereference task->task_works + * and zeroed the destination register. + */ +#include "exhandler_kern.skel.h" + +void test_exhandler(void) +{ + int err = 0, duration = 0, status; + struct exhandler_kern *skel; + pid_t cpid; + + skel = exhandler_kern__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton failed: %d\n", err)) + goto cleanup; + + skel->bss->test_pid = getpid(); + + err = exhandler_kern__attach(skel); + if (!ASSERT_OK(err, "attach")) + goto cleanup; + cpid = fork(); + if (!ASSERT_GT(cpid, -1, "fork failed")) + goto cleanup; + if (cpid == 0) + _exit(0); + waitpid(cpid, &status, 0); + + ASSERT_NEQ(skel->bss->exception_triggered, 0, "verify exceptions occurred"); +cleanup: + exhandler_kern__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c new file mode 100644 index 000000000..130f5b82d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include "fentry_test.lskel.h" +#include "fexit_test.lskel.h" + +void test_fentry_fexit(void) +{ + struct fentry_test_lskel *fentry_skel = NULL; + struct fexit_test_lskel *fexit_skel = NULL; + __u64 *fentry_res, *fexit_res; + int err, prog_fd, i; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + fentry_skel = fentry_test_lskel__open_and_load(); + if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load")) + goto close_prog; + fexit_skel = fexit_test_lskel__open_and_load(); + if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load")) + goto close_prog; + + err = fentry_test_lskel__attach(fentry_skel); + if (!ASSERT_OK(err, "fentry_attach")) + goto close_prog; + err = fexit_test_lskel__attach(fexit_skel); + if (!ASSERT_OK(err, "fexit_attach")) + goto close_prog; + + prog_fd = fexit_skel->progs.test1.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6 test_run"); + ASSERT_OK(topts.retval, "ipv6 test retval"); + + fentry_res = (__u64 *)fentry_skel->bss; + fexit_res = (__u64 *)fexit_skel->bss; + printf("%lld\n", fentry_skel->bss->test1_result); + for (i = 0; i < 8; i++) { + ASSERT_EQ(fentry_res[i], 1, "fentry result"); + ASSERT_EQ(fexit_res[i], 1, "fexit result"); + } + +close_prog: + fentry_test_lskel__destroy(fentry_skel); + fexit_test_lskel__destroy(fexit_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c new file mode 100644 index 000000000..c0d1d61d5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include "fentry_test.lskel.h" + +static int fentry_test(struct fentry_test_lskel *fentry_skel) +{ + int err, prog_fd, i; + int link_fd; + __u64 *result; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + err = fentry_test_lskel__attach(fentry_skel); + if (!ASSERT_OK(err, "fentry_attach")) + return err; + + /* Check that already linked program can't be attached again. */ + link_fd = fentry_test_lskel__test1__attach(fentry_skel); + if (!ASSERT_LT(link_fd, 0, "fentry_attach_link")) + return -1; + + prog_fd = fentry_skel->progs.test1.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + result = (__u64 *)fentry_skel->bss; + for (i = 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) { + if (!ASSERT_EQ(result[i], 1, "fentry_result")) + return -1; + } + + fentry_test_lskel__detach(fentry_skel); + + /* zero results for re-attach test */ + memset(fentry_skel->bss, 0, sizeof(*fentry_skel->bss)); + return 0; +} + +void test_fentry_test(void) +{ + struct fentry_test_lskel *fentry_skel = NULL; + int err; + + fentry_skel = fentry_test_lskel__open_and_load(); + if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load")) + goto cleanup; + + err = fentry_test(fentry_skel); + if (!ASSERT_OK(err, "fentry_first_attach")) + goto cleanup; + + err = fentry_test(fentry_skel); + ASSERT_OK(err, "fentry_second_attach"); + +cleanup: + fentry_test_lskel__destroy(fentry_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c new file mode 100644 index 000000000..d1e32e792 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -0,0 +1,528 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include <network_helpers.h> +#include <bpf/btf.h> +#include "bind4_prog.skel.h" + +typedef int (*test_cb)(struct bpf_object *obj); + +static int check_data_map(struct bpf_object *obj, int prog_cnt, bool reset) +{ + struct bpf_map *data_map = NULL, *map; + __u64 *result = NULL; + const int zero = 0; + __u32 duration = 0; + int ret = -1, i; + + result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64)); + if (CHECK(!result, "alloc_memory", "failed to alloc memory")) + return -ENOMEM; + + bpf_object__for_each_map(map, obj) + if (bpf_map__is_internal(map)) { + data_map = map; + break; + } + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto out; + + ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, result); + if (CHECK(ret, "get_result", + "failed to get output data: %d\n", ret)) + goto out; + + for (i = 0; i < prog_cnt; i++) { + if (CHECK(result[i] != 1, "result", + "fexit_bpf2bpf result[%d] failed err %llu\n", + i, result[i])) + goto out; + result[i] = 0; + } + if (reset) { + ret = bpf_map_update_elem(bpf_map__fd(data_map), &zero, result, 0); + if (CHECK(ret, "reset_result", "failed to reset result\n")) + goto out; + } + + ret = 0; +out: + free(result); + return ret; +} + +static void test_fexit_bpf2bpf_common(const char *obj_file, + const char *target_obj_file, + int prog_cnt, + const char **prog_name, + bool run_prog, + test_cb cb) +{ + struct bpf_object *obj = NULL, *tgt_obj; + __u32 tgt_prog_id, info_len; + struct bpf_prog_info prog_info = {}; + struct bpf_program **prog = NULL, *p; + struct bpf_link **link = NULL; + int err, tgt_fd, i; + struct btf *btf; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v6, + .data_size_in = sizeof(pkt_v6), + .repeat = 1, + ); + + err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, + &tgt_obj, &tgt_fd); + if (!ASSERT_OK(err, "tgt_prog_load")) + return; + + info_len = sizeof(prog_info); + err = bpf_obj_get_info_by_fd(tgt_fd, &prog_info, &info_len); + if (!ASSERT_OK(err, "tgt_fd_get_info")) + goto close_prog; + + tgt_prog_id = prog_info.id; + btf = bpf_object__btf(tgt_obj); + + link = calloc(sizeof(struct bpf_link *), prog_cnt); + if (!ASSERT_OK_PTR(link, "link_ptr")) + goto close_prog; + + prog = calloc(sizeof(struct bpf_program *), prog_cnt); + if (!ASSERT_OK_PTR(prog, "prog_ptr")) + goto close_prog; + + obj = bpf_object__open_file(obj_file, NULL); + if (!ASSERT_OK_PTR(obj, "obj_open")) + goto close_prog; + + bpf_object__for_each_program(p, obj) { + err = bpf_program__set_attach_target(p, tgt_fd, NULL); + ASSERT_OK(err, "set_attach_target"); + } + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + goto close_prog; + + for (i = 0; i < prog_cnt; i++) { + struct bpf_link_info link_info; + struct bpf_program *pos; + const char *pos_sec_name; + char *tgt_name; + __s32 btf_id; + + tgt_name = strstr(prog_name[i], "/"); + if (!ASSERT_OK_PTR(tgt_name, "tgt_name")) + goto close_prog; + btf_id = btf__find_by_name_kind(btf, tgt_name + 1, BTF_KIND_FUNC); + + prog[i] = NULL; + bpf_object__for_each_program(pos, obj) { + pos_sec_name = bpf_program__section_name(pos); + if (pos_sec_name && !strcmp(pos_sec_name, prog_name[i])) { + prog[i] = pos; + break; + } + } + if (!ASSERT_OK_PTR(prog[i], prog_name[i])) + goto close_prog; + + link[i] = bpf_program__attach_trace(prog[i]); + if (!ASSERT_OK_PTR(link[i], "attach_trace")) + goto close_prog; + + info_len = sizeof(link_info); + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link[i]), + &link_info, &info_len); + ASSERT_OK(err, "link_fd_get_info"); + ASSERT_EQ(link_info.tracing.attach_type, + bpf_program__expected_attach_type(prog[i]), + "link_attach_type"); + ASSERT_EQ(link_info.tracing.target_obj_id, tgt_prog_id, "link_tgt_obj_id"); + ASSERT_EQ(link_info.tracing.target_btf_id, btf_id, "link_tgt_btf_id"); + } + + if (cb) { + err = cb(obj); + if (err) + goto close_prog; + } + + if (!run_prog) + goto close_prog; + + err = bpf_prog_test_run_opts(tgt_fd, &topts); + ASSERT_OK(err, "prog_run"); + ASSERT_EQ(topts.retval, 0, "prog_run_ret"); + + if (check_data_map(obj, prog_cnt, false)) + goto close_prog; + +close_prog: + for (i = 0; i < prog_cnt; i++) + bpf_link__destroy(link[i]); + bpf_object__close(obj); + bpf_object__close(tgt_obj); + free(link); + free(prog); +} + +static void test_target_no_callees(void) +{ + const char *prog_name[] = { + "fexit/test_pkt_md_access", + }; + test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.bpf.o", + "./test_pkt_md_access.bpf.o", + ARRAY_SIZE(prog_name), + prog_name, true, NULL); +} + +static void test_target_yes_callees(void) +{ + const char *prog_name[] = { + "fexit/test_pkt_access", + "fexit/test_pkt_access_subprog1", + "fexit/test_pkt_access_subprog2", + "fexit/test_pkt_access_subprog3", + }; + test_fexit_bpf2bpf_common("./fexit_bpf2bpf.bpf.o", + "./test_pkt_access.bpf.o", + ARRAY_SIZE(prog_name), + prog_name, true, NULL); +} + +static void test_func_replace(void) +{ + const char *prog_name[] = { + "fexit/test_pkt_access", + "fexit/test_pkt_access_subprog1", + "fexit/test_pkt_access_subprog2", + "fexit/test_pkt_access_subprog3", + "freplace/get_skb_len", + "freplace/get_skb_ifindex", + "freplace/get_constant", + "freplace/test_pkt_write_access_subprog", + }; + test_fexit_bpf2bpf_common("./fexit_bpf2bpf.bpf.o", + "./test_pkt_access.bpf.o", + ARRAY_SIZE(prog_name), + prog_name, true, NULL); +} + +static void test_func_replace_verify(void) +{ + const char *prog_name[] = { + "freplace/do_bind", + }; + test_fexit_bpf2bpf_common("./freplace_connect4.bpf.o", + "./connect4_prog.bpf.o", + ARRAY_SIZE(prog_name), + prog_name, false, NULL); +} + +static int test_second_attach(struct bpf_object *obj) +{ + const char *prog_name = "security_new_get_constant"; + const char *tgt_name = "get_constant"; + const char *tgt_obj_file = "./test_pkt_access.bpf.o"; + struct bpf_program *prog = NULL; + struct bpf_object *tgt_obj; + struct bpf_link *link; + int err = 0, tgt_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v6, + .data_size_in = sizeof(pkt_v6), + .repeat = 1, + ); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (!ASSERT_OK_PTR(prog, "find_prog")) + return -ENOENT; + + err = bpf_prog_test_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC, + &tgt_obj, &tgt_fd); + if (!ASSERT_OK(err, "second_prog_load")) + return err; + + link = bpf_program__attach_freplace(prog, tgt_fd, tgt_name); + if (!ASSERT_OK_PTR(link, "second_link")) + goto out; + + err = bpf_prog_test_run_opts(tgt_fd, &topts); + if (!ASSERT_OK(err, "ipv6 test_run")) + goto out; + if (!ASSERT_OK(topts.retval, "ipv6 retval")) + goto out; + + err = check_data_map(obj, 1, true); + if (err) + goto out; + +out: + bpf_link__destroy(link); + bpf_object__close(tgt_obj); + return err; +} + +static void test_func_replace_multi(void) +{ + const char *prog_name[] = { + "freplace/get_constant", + }; + test_fexit_bpf2bpf_common("./freplace_get_constant.bpf.o", + "./test_pkt_access.bpf.o", + ARRAY_SIZE(prog_name), + prog_name, true, test_second_attach); +} + +static void test_fmod_ret_freplace(void) +{ + struct bpf_object *freplace_obj = NULL, *pkt_obj, *fmod_obj = NULL; + const char *freplace_name = "./freplace_get_constant.bpf.o"; + const char *fmod_ret_name = "./fmod_ret_freplace.bpf.o"; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); + const char *tgt_name = "./test_pkt_access.bpf.o"; + struct bpf_link *freplace_link = NULL; + struct bpf_program *prog; + __u32 duration = 0; + int err, pkt_fd, attach_prog_fd; + + err = bpf_prog_test_load(tgt_name, BPF_PROG_TYPE_UNSPEC, + &pkt_obj, &pkt_fd); + /* the target prog should load fine */ + if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n", + tgt_name, err, errno)) + return; + + freplace_obj = bpf_object__open_file(freplace_name, NULL); + if (!ASSERT_OK_PTR(freplace_obj, "freplace_obj_open")) + goto out; + + prog = bpf_object__next_program(freplace_obj, NULL); + err = bpf_program__set_attach_target(prog, pkt_fd, NULL); + ASSERT_OK(err, "freplace__set_attach_target"); + + err = bpf_object__load(freplace_obj); + if (CHECK(err, "freplace_obj_load", "err %d\n", err)) + goto out; + + freplace_link = bpf_program__attach_trace(prog); + if (!ASSERT_OK_PTR(freplace_link, "freplace_attach_trace")) + goto out; + + fmod_obj = bpf_object__open_file(fmod_ret_name, NULL); + if (!ASSERT_OK_PTR(fmod_obj, "fmod_obj_open")) + goto out; + + attach_prog_fd = bpf_program__fd(prog); + prog = bpf_object__next_program(fmod_obj, NULL); + err = bpf_program__set_attach_target(prog, attach_prog_fd, NULL); + ASSERT_OK(err, "fmod_ret_set_attach_target"); + + err = bpf_object__load(fmod_obj); + if (CHECK(!err, "fmod_obj_load", "loading fmod_ret should fail\n")) + goto out; + +out: + bpf_link__destroy(freplace_link); + bpf_object__close(freplace_obj); + bpf_object__close(fmod_obj); + bpf_object__close(pkt_obj); +} + + +static void test_func_sockmap_update(void) +{ + const char *prog_name[] = { + "freplace/cls_redirect", + }; + test_fexit_bpf2bpf_common("./freplace_cls_redirect.bpf.o", + "./test_cls_redirect.bpf.o", + ARRAY_SIZE(prog_name), + prog_name, false, NULL); +} + +static void test_obj_load_failure_common(const char *obj_file, + const char *target_obj_file) +{ + /* + * standalone test that asserts failure to load freplace prog + * because of invalid return code. + */ + struct bpf_object *obj = NULL, *pkt_obj; + struct bpf_program *prog; + int err, pkt_fd; + __u32 duration = 0; + + err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, + &pkt_obj, &pkt_fd); + /* the target prog should load fine */ + if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n", + target_obj_file, err, errno)) + return; + + obj = bpf_object__open_file(obj_file, NULL); + if (!ASSERT_OK_PTR(obj, "obj_open")) + goto close_prog; + + prog = bpf_object__next_program(obj, NULL); + err = bpf_program__set_attach_target(prog, pkt_fd, NULL); + ASSERT_OK(err, "set_attach_target"); + + /* It should fail to load the program */ + err = bpf_object__load(obj); + if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err)) + goto close_prog; + +close_prog: + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} + +static void test_func_replace_return_code(void) +{ + /* test invalid return code in the replaced program */ + test_obj_load_failure_common("./freplace_connect_v4_prog.bpf.o", + "./connect4_prog.bpf.o"); +} + +static void test_func_map_prog_compatibility(void) +{ + /* test with spin lock map value in the replaced program */ + test_obj_load_failure_common("./freplace_attach_probe.bpf.o", + "./test_attach_probe.bpf.o"); +} + +static void test_func_replace_global_func(void) +{ + const char *prog_name[] = { + "freplace/test_pkt_access", + }; + + test_fexit_bpf2bpf_common("./freplace_global_func.bpf.o", + "./test_pkt_access.bpf.o", + ARRAY_SIZE(prog_name), + prog_name, false, NULL); +} + +static int find_prog_btf_id(const char *name, __u32 attach_prog_fd) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + struct btf *btf; + int ret; + + ret = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len); + if (ret) + return ret; + + if (!info.btf_id) + return -EINVAL; + + btf = btf__load_from_kernel_by_id(info.btf_id); + ret = libbpf_get_error(btf); + if (ret) + return ret; + + ret = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); + btf__free(btf); + return ret; +} + +static int load_fentry(int attach_prog_fd, int attach_btf_id) +{ + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .expected_attach_type = BPF_TRACE_FENTRY, + .attach_prog_fd = attach_prog_fd, + .attach_btf_id = attach_btf_id, + ); + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + return bpf_prog_load(BPF_PROG_TYPE_TRACING, + "bind4_fentry", + "GPL", + insns, + ARRAY_SIZE(insns), + &opts); +} + +static void test_fentry_to_cgroup_bpf(void) +{ + struct bind4_prog *skel = NULL; + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + int cgroup_fd = -1; + int fentry_fd = -1; + int btf_id; + + cgroup_fd = test__join_cgroup("/fentry_to_cgroup_bpf"); + if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd")) + return; + + skel = bind4_prog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + goto cleanup; + + skel->links.bind_v4_prog = bpf_program__attach_cgroup(skel->progs.bind_v4_prog, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.bind_v4_prog, "bpf_program__attach_cgroup")) + goto cleanup; + + btf_id = find_prog_btf_id("bind_v4_prog", bpf_program__fd(skel->progs.bind_v4_prog)); + if (!ASSERT_GE(btf_id, 0, "find_prog_btf_id")) + goto cleanup; + + fentry_fd = load_fentry(bpf_program__fd(skel->progs.bind_v4_prog), btf_id); + if (!ASSERT_GE(fentry_fd, 0, "load_fentry")) + goto cleanup; + + /* Make sure bpf_obj_get_info_by_fd works correctly when attaching + * to another BPF program. + */ + + ASSERT_OK(bpf_obj_get_info_by_fd(fentry_fd, &info, &info_len), + "bpf_obj_get_info_by_fd"); + + ASSERT_EQ(info.btf_id, 0, "info.btf_id"); + ASSERT_EQ(info.attach_btf_id, btf_id, "info.attach_btf_id"); + ASSERT_GT(info.attach_btf_obj_id, 0, "info.attach_btf_obj_id"); + +cleanup: + if (cgroup_fd >= 0) + close(cgroup_fd); + if (fentry_fd >= 0) + close(fentry_fd); + bind4_prog__destroy(skel); +} + +/* NOTE: affect other tests, must run in serial mode */ +void serial_test_fexit_bpf2bpf(void) +{ + if (test__start_subtest("target_no_callees")) + test_target_no_callees(); + if (test__start_subtest("target_yes_callees")) + test_target_yes_callees(); + if (test__start_subtest("func_replace")) + test_func_replace(); + if (test__start_subtest("func_replace_verify")) + test_func_replace_verify(); + if (test__start_subtest("func_sockmap_update")) + test_func_sockmap_update(); + if (test__start_subtest("func_replace_return_code")) + test_func_replace_return_code(); + if (test__start_subtest("func_map_prog_compatibility")) + test_func_map_prog_compatibility(); + if (test__start_subtest("func_replace_multi")) + test_func_replace_multi(); + if (test__start_subtest("fmod_ret_freplace")) + test_fmod_ret_freplace(); + if (test__start_subtest("func_replace_global_func")) + test_func_replace_global_func(); + if (test__start_subtest("fentry_to_cgroup_bpf")) + test_fentry_to_cgroup_bpf(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c new file mode 100644 index 000000000..f949647db --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#define _GNU_SOURCE +#include <sched.h> +#include <test_progs.h> +#include <time.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include "fexit_sleep.lskel.h" + +static int do_sleep(void *skel) +{ + struct fexit_sleep_lskel *fexit_skel = skel; + struct timespec ts1 = { .tv_nsec = 1 }; + struct timespec ts2 = { .tv_sec = 10 }; + + fexit_skel->bss->pid = getpid(); + (void)syscall(__NR_nanosleep, &ts1, NULL); + (void)syscall(__NR_nanosleep, &ts2, NULL); + return 0; +} + +#define STACK_SIZE (1024 * 1024) +static char child_stack[STACK_SIZE]; + +void test_fexit_sleep(void) +{ + struct fexit_sleep_lskel *fexit_skel = NULL; + int wstatus, duration = 0; + pid_t cpid; + int err, fexit_cnt; + + fexit_skel = fexit_sleep_lskel__open_and_load(); + if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) + goto cleanup; + + err = fexit_sleep_lskel__attach(fexit_skel); + if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) + goto cleanup; + + cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel); + if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno))) + goto cleanup; + + /* wait until first sys_nanosleep ends and second sys_nanosleep starts */ + while (READ_ONCE(fexit_skel->bss->fentry_cnt) != 2); + fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt); + if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt)) + goto cleanup; + + /* close progs and detach them. That will trigger two nop5->jmp5 rewrites + * in the trampolines to skip nanosleep_fexit prog. + * The nanosleep_fentry prog will get detached first. + * The nanosleep_fexit prog will get detached second. + * Detaching will trigger freeing of both progs JITed images. + * There will be two dying bpf_tramp_image-s, but only the initial + * bpf_tramp_image (with both _fentry and _fexit progs will be stuck + * waiting for percpu_ref_kill to confirm). The other one + * will be freed quickly. + */ + close(fexit_skel->progs.nanosleep_fentry.prog_fd); + close(fexit_skel->progs.nanosleep_fexit.prog_fd); + fexit_sleep_lskel__detach(fexit_skel); + + /* kill the thread to unwind sys_nanosleep stack through the trampoline */ + kill(cpid, 9); + + if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno))) + goto cleanup; + if (CHECK(WEXITSTATUS(wstatus) != 0, "exitstatus", "failed")) + goto cleanup; + + /* The bypassed nanosleep_fexit prog shouldn't have executed. + * Unlike progs the maps were not freed and directly accessible. + */ + fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt); + if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt)) + goto cleanup; + +cleanup: + fexit_sleep_lskel__destroy(fexit_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c new file mode 100644 index 000000000..5a7e6011f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +/* that's kernel internal BPF_MAX_TRAMP_PROGS define */ +#define CNT 38 + +void serial_test_fexit_stress(void) +{ + int fexit_fd[CNT] = {}; + int link_fd[CNT] = {}; + int err, i; + + const struct bpf_insn trace_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + LIBBPF_OPTS(bpf_prog_load_opts, trace_opts, + .expected_attach_type = BPF_TRACE_FEXIT, + ); + + LIBBPF_OPTS(bpf_test_run_opts, topts); + + err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1", + trace_opts.expected_attach_type); + if (!ASSERT_GT(err, 0, "find_vmlinux_btf_id")) + goto out; + trace_opts.attach_btf_id = err; + + for (i = 0; i < CNT; i++) { + fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL", + trace_program, + sizeof(trace_program) / sizeof(struct bpf_insn), + &trace_opts); + if (!ASSERT_GE(fexit_fd[i], 0, "fexit load")) + goto out; + link_fd[i] = bpf_link_create(fexit_fd[i], 0, BPF_TRACE_FEXIT, NULL); + if (!ASSERT_GE(link_fd[i], 0, "fexit attach")) + goto out; + } + + err = bpf_prog_test_run_opts(fexit_fd[0], &topts); + ASSERT_OK(err, "bpf_prog_test_run_opts"); + +out: + for (i = 0; i < CNT; i++) { + if (link_fd[i]) + close(link_fd[i]); + if (fexit_fd[i]) + close(fexit_fd[i]); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c new file mode 100644 index 000000000..101b73430 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include "fexit_test.lskel.h" + +static int fexit_test(struct fexit_test_lskel *fexit_skel) +{ + int err, prog_fd, i; + int link_fd; + __u64 *result; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + err = fexit_test_lskel__attach(fexit_skel); + if (!ASSERT_OK(err, "fexit_attach")) + return err; + + /* Check that already linked program can't be attached again. */ + link_fd = fexit_test_lskel__test1__attach(fexit_skel); + if (!ASSERT_LT(link_fd, 0, "fexit_attach_link")) + return -1; + + prog_fd = fexit_skel->progs.test1.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + result = (__u64 *)fexit_skel->bss; + for (i = 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) { + if (!ASSERT_EQ(result[i], 1, "fexit_result")) + return -1; + } + + fexit_test_lskel__detach(fexit_skel); + + /* zero results for re-attach test */ + memset(fexit_skel->bss, 0, sizeof(*fexit_skel->bss)); + return 0; +} + +void test_fexit_test(void) +{ + struct fexit_test_lskel *fexit_skel = NULL; + int err; + + fexit_skel = fexit_test_lskel__open_and_load(); + if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load")) + goto cleanup; + + err = fexit_test(fexit_skel); + if (!ASSERT_OK(err, "fexit_first_attach")) + goto cleanup; + + err = fexit_test(fexit_skel); + ASSERT_OK(err, "fexit_second_attach"); + +cleanup: + fexit_test_lskel__destroy(fexit_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c new file mode 100644 index 000000000..5165b38f0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include <sys/types.h> +#include <unistd.h> +#include "find_vma.skel.h" +#include "find_vma_fail1.skel.h" +#include "find_vma_fail2.skel.h" + +static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test) +{ + if (need_test) { + ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); + ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); + ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); + ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); + } + + skel->bss->found_vm_exec = 0; + skel->data->find_addr_ret = -1; + skel->data->find_zero_ret = -1; + skel->bss->d_iname[0] = 0; +} + +static int open_pe(void) +{ + struct perf_event_attr attr = {0}; + int pfd; + + /* create perf event */ + attr.size = sizeof(attr); + attr.type = PERF_TYPE_HARDWARE; + attr.config = PERF_COUNT_HW_CPU_CYCLES; + attr.freq = 1; + attr.sample_freq = 1000; + pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC); + + return pfd >= 0 ? pfd : -errno; +} + +static bool find_vma_pe_condition(struct find_vma *skel) +{ + return skel->bss->found_vm_exec == 0 || + skel->data->find_addr_ret != 0 || + skel->data->find_zero_ret == -1 || + strcmp(skel->bss->d_iname, "test_progs") != 0; +} + +static void test_find_vma_pe(struct find_vma *skel) +{ + struct bpf_link *link = NULL; + volatile int j = 0; + int pfd, i; + const int one_bn = 1000000000; + + pfd = open_pe(); + if (pfd < 0) { + if (pfd == -ENOENT || pfd == -EOPNOTSUPP) { + printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__); + test__skip(); + goto cleanup; + } + if (!ASSERT_GE(pfd, 0, "perf_event_open")) + goto cleanup; + } + + link = bpf_program__attach_perf_event(skel->progs.handle_pe, pfd); + if (!ASSERT_OK_PTR(link, "attach_perf_event")) + goto cleanup; + + for (i = 0; i < one_bn && find_vma_pe_condition(skel); ++i) + ++j; + + test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */, i == one_bn); +cleanup: + bpf_link__destroy(link); + close(pfd); +} + +static void test_find_vma_kprobe(struct find_vma *skel) +{ + int err; + + err = find_vma__attach(skel); + if (!ASSERT_OK(err, "get_branch_snapshot__attach")) + return; + + getpgid(skel->bss->target_pid); + test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */, true); +} + +static void test_illegal_write_vma(void) +{ + struct find_vma_fail1 *skel; + + skel = find_vma_fail1__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "find_vma_fail1__open_and_load")) + find_vma_fail1__destroy(skel); +} + +static void test_illegal_write_task(void) +{ + struct find_vma_fail2 *skel; + + skel = find_vma_fail2__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "find_vma_fail2__open_and_load")) + find_vma_fail2__destroy(skel); +} + +void serial_test_find_vma(void) +{ + struct find_vma *skel; + + skel = find_vma__open_and_load(); + if (!ASSERT_OK_PTR(skel, "find_vma__open_and_load")) + return; + + skel->bss->target_pid = getpid(); + skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe; + + test_find_vma_pe(skel); + test_find_vma_kprobe(skel); + + find_vma__destroy(skel); + test_illegal_write_vma(); + test_illegal_write_task(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c new file mode 100644 index 000000000..7acca37a3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -0,0 +1,658 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include <error.h> +#include <linux/if.h> +#include <linux/if_tun.h> +#include <sys/uio.h> + +#include "bpf_flow.skel.h" + +#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */ + +#ifndef IP_MF +#define IP_MF 0x2000 +#endif + +#define CHECK_FLOW_KEYS(desc, got, expected) \ + _CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ + desc, \ + topts.duration, \ + "nhoff=%u/%u " \ + "thoff=%u/%u " \ + "addr_proto=0x%x/0x%x " \ + "is_frag=%u/%u " \ + "is_first_frag=%u/%u " \ + "is_encap=%u/%u " \ + "ip_proto=0x%x/0x%x " \ + "n_proto=0x%x/0x%x " \ + "flow_label=0x%x/0x%x " \ + "sport=%u/%u " \ + "dport=%u/%u\n", \ + got.nhoff, expected.nhoff, \ + got.thoff, expected.thoff, \ + got.addr_proto, expected.addr_proto, \ + got.is_frag, expected.is_frag, \ + got.is_first_frag, expected.is_first_frag, \ + got.is_encap, expected.is_encap, \ + got.ip_proto, expected.ip_proto, \ + got.n_proto, expected.n_proto, \ + got.flow_label, expected.flow_label, \ + got.sport, expected.sport, \ + got.dport, expected.dport) + +struct ipv4_pkt { + struct ethhdr eth; + struct iphdr iph; + struct tcphdr tcp; +} __packed; + +struct ipip_pkt { + struct ethhdr eth; + struct iphdr iph; + struct iphdr iph_inner; + struct tcphdr tcp; +} __packed; + +struct svlan_ipv4_pkt { + struct ethhdr eth; + __u16 vlan_tci; + __u16 vlan_proto; + struct iphdr iph; + struct tcphdr tcp; +} __packed; + +struct ipv6_pkt { + struct ethhdr eth; + struct ipv6hdr iph; + struct tcphdr tcp; +} __packed; + +struct ipv6_frag_pkt { + struct ethhdr eth; + struct ipv6hdr iph; + struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; + } ipf; + struct tcphdr tcp; +} __packed; + +struct dvlan_ipv6_pkt { + struct ethhdr eth; + __u16 vlan_tci; + __u16 vlan_proto; + __u16 vlan_tci2; + __u16 vlan_proto2; + struct ipv6hdr iph; + struct tcphdr tcp; +} __packed; + +struct test { + const char *name; + union { + struct ipv4_pkt ipv4; + struct svlan_ipv4_pkt svlan_ipv4; + struct ipip_pkt ipip; + struct ipv6_pkt ipv6; + struct ipv6_frag_pkt ipv6_frag; + struct dvlan_ipv6_pkt dvlan_ipv6; + } pkt; + struct bpf_flow_keys keys; + __u32 flags; + __u32 retval; +}; + +#define VLAN_HLEN 4 + +static __u32 duration; +struct test tests[] = { + { + .name = "ipv4", + .pkt.ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .sport = 80, + .dport = 8080, + }, + .retval = BPF_OK, + }, + { + .name = "ipv6", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, + }, + .retval = BPF_OK, + }, + { + .name = "802.1q-ipv4", + .pkt.svlan_ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q), + .vlan_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN + VLAN_HLEN, + .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .sport = 80, + .dport = 8080, + }, + .retval = BPF_OK, + }, + { + .name = "802.1ad-ipv6", + .pkt.dvlan_ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD), + .vlan_proto = __bpf_constant_htons(ETH_P_8021Q), + .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN + VLAN_HLEN * 2, + .thoff = ETH_HLEN + VLAN_HLEN * 2 + + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, + }, + .retval = BPF_OK, + }, + { + .name = "ipv4-frag", + .pkt.ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.frag_off = __bpf_constant_htons(IP_MF), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_frag = true, + .is_first_frag = true, + .sport = 80, + .dport = 8080, + }, + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .retval = BPF_OK, + }, + { + .name = "ipv4-no-frag", + .pkt.ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.frag_off = __bpf_constant_htons(IP_MF), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_frag = true, + .is_first_frag = true, + }, + .retval = BPF_OK, + }, + { + .name = "ipv6-frag", + .pkt.ipv6_frag = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_FRAGMENT, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .ipf.nexthdr = IPPROTO_TCP, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + + sizeof(struct frag_hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .is_frag = true, + .is_first_frag = true, + .sport = 80, + .dport = 8080, + }, + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .retval = BPF_OK, + }, + { + .name = "ipv6-no-frag", + .pkt.ipv6_frag = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_FRAGMENT, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .ipf.nexthdr = IPPROTO_TCP, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + + sizeof(struct frag_hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .is_frag = true, + .is_first_frag = true, + }, + .retval = BPF_OK, + }, + { + .name = "ipv6-flow-label", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.flow_lbl = { 0xb, 0xee, 0xef }, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, + .flow_label = __bpf_constant_htonl(0xbeeef), + }, + .retval = BPF_OK, + }, + { + .name = "ipv6-no-flow-label", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.flow_lbl = { 0xb, 0xee, 0xef }, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .flow_label = __bpf_constant_htonl(0xbeeef), + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + .retval = BPF_OK, + }, + { + .name = "ipip-encap", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_IPIP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES) - + sizeof(struct iphdr), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr) + + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + .sport = 80, + .dport = 8080, + }, + .retval = BPF_OK, + }, + { + .name = "ipip-no-encap", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_IPIP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES) - + sizeof(struct iphdr), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_IPIP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + .retval = BPF_OK, + }, + { + .name = "ipip-encap-dissector-continue", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_IPIP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.saddr = __bpf_constant_htonl(FLOW_CONTINUE_SADDR), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES) - + sizeof(struct iphdr), + .tcp.doff = 5, + .tcp.source = 99, + .tcp.dest = 9090, + }, + .retval = BPF_FLOW_DISSECTOR_CONTINUE, + }, +}; + +static int create_tap(const char *ifname) +{ + struct ifreq ifr = { + .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS, + }; + int fd, ret; + + strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); + + fd = open("/dev/net/tun", O_RDWR); + if (fd < 0) + return -1; + + ret = ioctl(fd, TUNSETIFF, &ifr); + if (ret) + return -1; + + return fd; +} + +static int tx_tap(int fd, void *pkt, size_t len) +{ + struct iovec iov[] = { + { + .iov_len = len, + .iov_base = pkt, + }, + }; + return writev(fd, iov, ARRAY_SIZE(iov)); +} + +static int ifup(const char *ifname) +{ + struct ifreq ifr = {}; + int sk, ret; + + strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); + + sk = socket(PF_INET, SOCK_DGRAM, 0); + if (sk < 0) + return -1; + + ret = ioctl(sk, SIOCGIFFLAGS, &ifr); + if (ret) { + close(sk); + return -1; + } + + ifr.ifr_flags |= IFF_UP; + ret = ioctl(sk, SIOCSIFFLAGS, &ifr); + if (ret) { + close(sk); + return -1; + } + + close(sk); + return 0; +} + +static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array) +{ + int i, err, map_fd, prog_fd; + struct bpf_program *prog; + char prog_name[32]; + + map_fd = bpf_map__fd(prog_array); + if (map_fd < 0) + return -1; + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (!prog) + return -1; + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) + return -1; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (err) + return -1; + } + return 0; +} + +static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) +{ + int i, err, keys_fd; + + keys_fd = bpf_map__fd(keys); + if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) + return; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + /* Keep in sync with 'flags' from eth_get_headlen. */ + __u32 eth_get_headlen_flags = + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct bpf_flow_keys flow_keys = {}; + __u32 key = (__u32)(tests[i].keys.sport) << 16 | + tests[i].keys.dport; + + /* For skb-less case we can't pass input flags; run + * only the tests that have a matching set of flags. + */ + + if (tests[i].flags != eth_get_headlen_flags) + continue; + + err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); + CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); + + /* check the stored flow_keys only if BPF_OK expected */ + if (tests[i].retval != BPF_OK) + continue; + + err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); + ASSERT_OK(err, "bpf_map_lookup_elem"); + + CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + + err = bpf_map_delete_elem(keys_fd, &key); + ASSERT_OK(err, "bpf_map_delete_elem"); + } +} + +static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd) +{ + int err, prog_fd; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) + return; + + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno)) + return; + + run_tests_skb_less(tap_fd, skel->maps.last_dissection); + + err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno); +} + +static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd) +{ + struct bpf_link *link; + int err, net_fd; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno)) + return; + + link = bpf_program__attach_netns(skel->progs._dissect, net_fd); + if (!ASSERT_OK_PTR(link, "attach_netns")) + goto out_close; + + run_tests_skb_less(tap_fd, skel->maps.last_dissection); + + err = bpf_link__destroy(link); + CHECK(err, "bpf_link__destroy", "err %d\n", err); +out_close: + close(net_fd); +} + +void test_flow_dissector(void) +{ + int i, err, prog_fd, keys_fd = -1, tap_fd; + struct bpf_flow *skel; + + skel = bpf_flow__open_and_load(); + if (CHECK(!skel, "skel", "failed to open/load skeleton\n")) + return; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) + goto out_destroy_skel; + keys_fd = bpf_map__fd(skel->maps.last_dissection); + if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) + goto out_destroy_skel; + err = init_prog_array(skel->obj, skel->maps.jmp_table); + if (CHECK(err, "init_prog_array", "err %d\n", err)) + goto out_destroy_skel; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + struct bpf_flow_keys flow_keys; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &tests[i].pkt, + .data_size_in = sizeof(tests[i].pkt), + .data_out = &flow_keys, + ); + static struct bpf_flow_keys ctx = {}; + + if (tests[i].flags) { + topts.ctx_in = &ctx; + topts.ctx_size_in = sizeof(ctx); + ctx.flags = tests[i].flags; + } + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, tests[i].retval, "test_run retval"); + + /* check the resulting flow_keys only if BPF_OK returned */ + if (topts.retval != BPF_OK) + continue; + ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), + "test_run data_size_out"); + CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + } + + /* Do the same tests but for skb-less flow dissector. + * We use a known path in the net/tun driver that calls + * eth_get_headlen and we manually export bpf_flow_keys + * via BPF map in this case. + */ + + tap_fd = create_tap("tap0"); + CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno); + err = ifup("tap0"); + CHECK(err, "ifup", "err %d errno %d\n", err, errno); + + /* Test direct prog attachment */ + test_skb_less_prog_attach(skel, tap_fd); + /* Test indirect prog attachment via link */ + test_skb_less_link_create(skel, tap_fd); + + close(tap_fd); +out_destroy_skel: + bpf_flow__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c new file mode 100644 index 000000000..c7a47b57a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +void serial_test_flow_dissector_load_bytes(void) +{ + struct bpf_flow_keys flow_keys; + struct bpf_insn prog[] = { + // BPF_REG_1 - 1st argument: context + // BPF_REG_2 - 2nd argument: offset, start at first byte + BPF_MOV64_IMM(BPF_REG_2, 0), + // BPF_REG_3 - 3rd argument: destination, reserve byte on stack + BPF_ALU64_REG(BPF_MOV, BPF_REG_3, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -1), + // BPF_REG_4 - 4th argument: copy one byte + BPF_MOV64_IMM(BPF_REG_4, 1), + // bpf_skb_load_bytes(ctx, sizeof(pkt_v4), ptr, 1) + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_skb_load_bytes), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + // if (ret == 0) return BPF_DROP (2) + BPF_MOV64_IMM(BPF_REG_0, BPF_DROP), + BPF_EXIT_INSN(), + // if (ret != 0) return BPF_OK (0) + BPF_MOV64_IMM(BPF_REG_0, BPF_OK), + BPF_EXIT_INSN(), + }; + int fd, err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = &flow_keys, + .data_size_out = sizeof(flow_keys), + .repeat = 1, + ); + + /* make sure bpf_skb_load_bytes is not allowed from skb-less context + */ + fd = bpf_test_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog, + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + ASSERT_GE(fd, 0, "bpf_test_load_program good fd"); + + err = bpf_prog_test_run_opts(fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), + "test_run data_size_out"); + ASSERT_EQ(topts.retval, BPF_OK, "test_run retval"); + + if (fd >= -1) + close(fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c new file mode 100644 index 000000000..7c79462d2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c @@ -0,0 +1,678 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Tests for attaching, detaching, and replacing flow_dissector BPF program. + */ + +#define _GNU_SOURCE +#include <errno.h> +#include <fcntl.h> +#include <sched.h> +#include <stdbool.h> +#include <sys/stat.h> +#include <unistd.h> + +#include <linux/bpf.h> +#include <bpf/bpf.h> + +#include "test_progs.h" + +static int init_net = -1; + +static __u32 query_attached_prog_id(int netns) +{ + __u32 prog_ids[1] = {}; + __u32 prog_cnt = ARRAY_SIZE(prog_ids); + int err; + + err = bpf_prog_query(netns, BPF_FLOW_DISSECTOR, 0, NULL, + prog_ids, &prog_cnt); + if (CHECK_FAIL(err)) { + perror("bpf_prog_query"); + return 0; + } + + return prog_cnt == 1 ? prog_ids[0] : 0; +} + +static bool prog_is_attached(int netns) +{ + return query_attached_prog_id(netns) > 0; +} + +static int load_prog(enum bpf_prog_type type) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, BPF_OK), + BPF_EXIT_INSN(), + }; + int fd; + + fd = bpf_test_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); + if (CHECK_FAIL(fd < 0)) + perror("bpf_test_load_program"); + + return fd; +} + +static __u32 query_prog_id(int prog) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + int err; + + err = bpf_obj_get_info_by_fd(prog, &info, &info_len); + if (CHECK_FAIL(err || info_len != sizeof(info))) { + perror("bpf_obj_get_info_by_fd"); + return 0; + } + + return info.id; +} + +static int unshare_net(int old_net) +{ + int err, new_net; + + err = unshare(CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("unshare(CLONE_NEWNET)"); + return -1; + } + new_net = open("/proc/self/ns/net", O_RDONLY); + if (CHECK_FAIL(new_net < 0)) { + perror("open(/proc/self/ns/net)"); + setns(old_net, CLONE_NEWNET); + return -1; + } + return new_net; +} + +static void test_prog_attach_prog_attach(int netns, int prog1, int prog2) +{ + int err; + + err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect success when attaching a different program */ + err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach(prog2) #1"); + goto out_detach; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); + + /* Expect failure when attaching the same program twice */ + err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(!err || errno != EINVAL)) + perror("bpf_prog_attach(prog2) #2"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); + +out_detach: + err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR); + if (CHECK_FAIL(err)) + perror("bpf_prog_detach"); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_create_link_create(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int link1, link2; + + link1 = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure creating link when another link exists */ + errno = 0; + link2 = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link2 >= 0 || errno != E2BIG)) + perror("bpf_prog_attach(prog2) expected E2BIG"); + if (link2 >= 0) + close(link2); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link1); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_prog_attach_link_create(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int err, link; + + err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure creating link when prog attached */ + errno = 0; + link = bpf_link_create(prog2, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link >= 0 || errno != EEXIST)) + perror("bpf_link_create(prog2) expected EEXIST"); + if (link >= 0) + close(link); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR); + if (CHECK_FAIL(err)) + perror("bpf_prog_detach"); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_create_prog_attach(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure attaching prog when link exists */ + errno = 0; + err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(!err || errno != EEXIST)) + perror("bpf_prog_attach(prog2) expected EEXIST"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_create_prog_detach(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure detaching prog when link exists */ + errno = 0; + err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR); + if (CHECK_FAIL(!err || errno != EINVAL)) + perror("bpf_prog_detach expected EINVAL"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_prog_attach_detach_query(int netns, int prog1, int prog2) +{ + int err; + + err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0); + if (CHECK_FAIL(err)) { + perror("bpf_prog_attach(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR); + if (CHECK_FAIL(err)) { + perror("bpf_prog_detach"); + return; + } + + /* Expect no prog attached after successful detach */ + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_create_close_query(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); + int link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link); + /* Expect no prog attached after closing last link FD */ + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_no_old_prog(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect success replacing the prog when old prog not specified */ + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(err)) + perror("bpf_link_update"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_replace_old_prog(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect success F_REPLACE and old prog specified to succeed */ + update_opts.flags = BPF_F_REPLACE; + update_opts.old_prog_fd = prog1; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(err)) + perror("bpf_link_update"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_same_prog(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect success updating the prog with the same one */ + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog1, &update_opts); + if (CHECK_FAIL(err)) + perror("bpf_link_update"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_invalid_opts(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect update to fail w/ old prog FD but w/o F_REPLACE*/ + errno = 0; + update_opts.flags = 0; + update_opts.old_prog_fd = prog1; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != EINVAL)) { + perror("bpf_link_update expected EINVAL"); + goto out_close; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect update to fail on old prog FD mismatch */ + errno = 0; + update_opts.flags = BPF_F_REPLACE; + update_opts.old_prog_fd = prog2; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != EPERM)) { + perror("bpf_link_update expected EPERM"); + goto out_close; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect update to fail for invalid old prog FD */ + errno = 0; + update_opts.flags = BPF_F_REPLACE; + update_opts.old_prog_fd = -1; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != EBADF)) { + perror("bpf_link_update expected EBADF"); + goto out_close; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect update to fail with invalid flags */ + errno = 0; + update_opts.flags = BPF_F_ALLOW_MULTI; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != EINVAL)) + perror("bpf_link_update expected EINVAL"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + +out_close: + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_invalid_prog(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link, prog3; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect failure when new prog FD is not valid */ + errno = 0; + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, -1, &update_opts); + if (CHECK_FAIL(!err || errno != EBADF)) { + perror("bpf_link_update expected EINVAL"); + goto out_close_link; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + prog3 = load_prog(BPF_PROG_TYPE_SOCKET_FILTER); + if (prog3 < 0) + goto out_close_link; + + /* Expect failure when new prog FD type doesn't match */ + errno = 0; + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog3, &update_opts); + if (CHECK_FAIL(!err || errno != EINVAL)) + perror("bpf_link_update expected EINVAL"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(prog3); +out_close_link: + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + +static void test_link_update_netns_gone(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link, old_net; + + old_net = netns; + netns = unshare_net(old_net); + if (netns < 0) + return; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(netns); + err = setns(old_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("setns(CLONE_NEWNET)"); + close(link); + return; + } + + /* Expect failure when netns destroyed */ + errno = 0; + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(!err || errno != ENOLINK)) + perror("bpf_link_update"); + + close(link); +} + +static void test_link_get_info(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + struct bpf_link_info info = {}; + struct stat netns_stat = {}; + __u32 info_len, link_id; + int err, link, old_net; + + old_net = netns; + netns = unshare_net(old_net); + if (netns < 0) + return; + + err = fstat(netns, &netns_stat); + if (CHECK_FAIL(err)) { + perror("stat(netns)"); + goto out_resetns; + } + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + goto out_resetns; + } + + info_len = sizeof(info); + err = bpf_obj_get_info_by_fd(link, &info, &info_len); + if (CHECK_FAIL(err)) { + perror("bpf_obj_get_info"); + goto out_unlink; + } + CHECK_FAIL(info_len != sizeof(info)); + + /* Expect link info to be sane and match prog and netns details */ + CHECK_FAIL(info.type != BPF_LINK_TYPE_NETNS); + CHECK_FAIL(info.id == 0); + CHECK_FAIL(info.prog_id != query_prog_id(prog1)); + CHECK_FAIL(info.netns.netns_ino != netns_stat.st_ino); + CHECK_FAIL(info.netns.attach_type != BPF_FLOW_DISSECTOR); + + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog2, &update_opts); + if (CHECK_FAIL(err)) { + perror("bpf_link_update(prog2)"); + goto out_unlink; + } + + link_id = info.id; + info_len = sizeof(info); + err = bpf_obj_get_info_by_fd(link, &info, &info_len); + if (CHECK_FAIL(err)) { + perror("bpf_obj_get_info"); + goto out_unlink; + } + CHECK_FAIL(info_len != sizeof(info)); + + /* Expect no info change after update except in prog id */ + CHECK_FAIL(info.type != BPF_LINK_TYPE_NETNS); + CHECK_FAIL(info.id != link_id); + CHECK_FAIL(info.prog_id != query_prog_id(prog2)); + CHECK_FAIL(info.netns.netns_ino != netns_stat.st_ino); + CHECK_FAIL(info.netns.attach_type != BPF_FLOW_DISSECTOR); + + /* Leave netns link is attached to and close last FD to it */ + err = setns(old_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("setns(NEWNET)"); + goto out_unlink; + } + close(netns); + old_net = -1; + netns = -1; + + info_len = sizeof(info); + err = bpf_obj_get_info_by_fd(link, &info, &info_len); + if (CHECK_FAIL(err)) { + perror("bpf_obj_get_info"); + goto out_unlink; + } + CHECK_FAIL(info_len != sizeof(info)); + + /* Expect netns_ino to change to 0 */ + CHECK_FAIL(info.type != BPF_LINK_TYPE_NETNS); + CHECK_FAIL(info.id != link_id); + CHECK_FAIL(info.prog_id != query_prog_id(prog2)); + CHECK_FAIL(info.netns.netns_ino != 0); + CHECK_FAIL(info.netns.attach_type != BPF_FLOW_DISSECTOR); + +out_unlink: + close(link); +out_resetns: + if (old_net != -1) + setns(old_net, CLONE_NEWNET); + if (netns != -1) + close(netns); +} + +static void run_tests(int netns) +{ + struct test { + const char *test_name; + void (*test_func)(int netns, int prog1, int prog2); + } tests[] = { + { "prog attach, prog attach", + test_prog_attach_prog_attach }, + { "link create, link create", + test_link_create_link_create }, + { "prog attach, link create", + test_prog_attach_link_create }, + { "link create, prog attach", + test_link_create_prog_attach }, + { "link create, prog detach", + test_link_create_prog_detach }, + { "prog attach, detach, query", + test_prog_attach_detach_query }, + { "link create, close, query", + test_link_create_close_query }, + { "link update no old prog", + test_link_update_no_old_prog }, + { "link update with replace old prog", + test_link_update_replace_old_prog }, + { "link update with same prog", + test_link_update_same_prog }, + { "link update invalid opts", + test_link_update_invalid_opts }, + { "link update invalid prog", + test_link_update_invalid_prog }, + { "link update netns gone", + test_link_update_netns_gone }, + { "link get info", + test_link_get_info }, + }; + int i, progs[2] = { -1, -1 }; + char test_name[80]; + + for (i = 0; i < ARRAY_SIZE(progs); i++) { + progs[i] = load_prog(BPF_PROG_TYPE_FLOW_DISSECTOR); + if (progs[i] < 0) + goto out_close; + } + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + snprintf(test_name, sizeof(test_name), + "flow dissector %s%s", + tests[i].test_name, + netns == init_net ? " (init_net)" : ""); + if (test__start_subtest(test_name)) + tests[i].test_func(netns, progs[0], progs[1]); + } +out_close: + for (i = 0; i < ARRAY_SIZE(progs); i++) { + if (progs[i] >= 0) + CHECK_FAIL(close(progs[i])); + } +} + +void serial_test_flow_dissector_reattach(void) +{ + int err, new_net, saved_net; + + saved_net = open("/proc/self/ns/net", O_RDONLY); + if (CHECK_FAIL(saved_net < 0)) { + perror("open(/proc/self/ns/net"); + return; + } + + init_net = open("/proc/1/ns/net", O_RDONLY); + if (CHECK_FAIL(init_net < 0)) { + perror("open(/proc/1/ns/net)"); + goto out_close; + } + + err = setns(init_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) { + perror("setns(/proc/1/ns/net)"); + goto out_close; + } + + if (prog_is_attached(init_net)) { + test__skip(); + printf("Can't test with flow dissector attached to init_net\n"); + goto out_setns; + } + + /* First run tests in root network namespace */ + run_tests(init_net); + + /* Then repeat tests in a non-root namespace */ + new_net = unshare_net(init_net); + if (new_net < 0) + goto out_setns; + run_tests(new_net); + close(new_net); + +out_setns: + /* Move back to netns we started in. */ + err = setns(saved_net, CLONE_NEWNET); + if (CHECK_FAIL(err)) + perror("setns(/proc/self/ns/net)"); + +out_close: + close(init_net); + close(saved_net); +} diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c new file mode 100644 index 000000000..8963f8a54 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/for_each.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include <network_helpers.h> +#include "for_each_hash_map_elem.skel.h" +#include "for_each_array_map_elem.skel.h" +#include "for_each_map_elem_write_key.skel.h" + +static unsigned int duration; + +static void test_hash_map(void) +{ + int i, err, max_entries; + struct for_each_hash_map_elem *skel; + __u64 *percpu_valbuf = NULL; + size_t percpu_val_sz; + __u32 key, num_cpus; + __u64 val; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + skel = for_each_hash_map_elem__open_and_load(); + if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load")) + return; + + max_entries = bpf_map__max_entries(skel->maps.hashmap); + for (i = 0; i < max_entries; i++) { + key = i; + val = i + 1; + err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key), + &val, sizeof(val), BPF_ANY); + if (!ASSERT_OK(err, "map_update")) + goto out; + } + + num_cpus = bpf_num_possible_cpus(); + percpu_val_sz = sizeof(__u64) * num_cpus; + percpu_valbuf = malloc(percpu_val_sz); + if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf")) + goto out; + + key = 1; + for (i = 0; i < num_cpus; i++) + percpu_valbuf[i] = i + 1; + err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key), + percpu_valbuf, percpu_val_sz, BPF_ANY); + if (!ASSERT_OK(err, "percpu_map_update")) + goto out; + + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts); + duration = topts.duration; + if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n", + err, errno, topts.retval)) + goto out; + + ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output"); + ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems"); + + key = 1; + err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0); + ASSERT_ERR(err, "hashmap_lookup"); + + ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called"); + ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus"); + ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems"); + ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key"); + ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val"); + ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output"); +out: + free(percpu_valbuf); + for_each_hash_map_elem__destroy(skel); +} + +static void test_array_map(void) +{ + __u32 key, num_cpus, max_entries; + int i, err; + struct for_each_array_map_elem *skel; + __u64 *percpu_valbuf = NULL; + size_t percpu_val_sz; + __u64 val, expected_total; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + skel = for_each_array_map_elem__open_and_load(); + if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load")) + return; + + expected_total = 0; + max_entries = bpf_map__max_entries(skel->maps.arraymap); + for (i = 0; i < max_entries; i++) { + key = i; + val = i + 1; + /* skip the last iteration for expected total */ + if (i != max_entries - 1) + expected_total += val; + err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key), + &val, sizeof(val), BPF_ANY); + if (!ASSERT_OK(err, "map_update")) + goto out; + } + + num_cpus = bpf_num_possible_cpus(); + percpu_val_sz = sizeof(__u64) * num_cpus; + percpu_valbuf = malloc(percpu_val_sz); + if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf")) + goto out; + + key = 0; + for (i = 0; i < num_cpus; i++) + percpu_valbuf[i] = i + 1; + err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key), + percpu_valbuf, percpu_val_sz, BPF_ANY); + if (!ASSERT_OK(err, "percpu_map_update")) + goto out; + + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts); + duration = topts.duration; + if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n", + err, errno, topts.retval)) + goto out; + + ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output"); + ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val"); + +out: + free(percpu_valbuf); + for_each_array_map_elem__destroy(skel); +} + +static void test_write_map_key(void) +{ + struct for_each_map_elem_write_key *skel; + + skel = for_each_map_elem_write_key__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load")) + for_each_map_elem_write_key__destroy(skel); +} + +void test_for_each(void) +{ + if (test__start_subtest("hash_map")) + test_hash_map(); + if (test__start_subtest("array_map")) + test_array_map(); + if (test__start_subtest("write_map_key")) + test_write_map_key(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c new file mode 100644 index 000000000..3948da12a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "get_branch_snapshot.skel.h" + +static int *pfd_array; +static int cpu_cnt; + +static bool is_hypervisor(void) +{ + char *line = NULL; + bool ret = false; + size_t len; + FILE *fp; + + fp = fopen("/proc/cpuinfo", "r"); + if (!fp) + return false; + + while (getline(&line, &len, fp) != -1) { + if (!strncmp(line, "flags", 5)) { + if (strstr(line, "hypervisor") != NULL) + ret = true; + break; + } + } + + free(line); + fclose(fp); + return ret; +} + +static int create_perf_events(void) +{ + struct perf_event_attr attr = {0}; + int cpu; + + /* create perf event */ + attr.size = sizeof(attr); + attr.type = PERF_TYPE_RAW; + attr.config = 0x1b00; + attr.sample_type = PERF_SAMPLE_BRANCH_STACK; + attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL | + PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; + + cpu_cnt = libbpf_num_possible_cpus(); + pfd_array = malloc(sizeof(int) * cpu_cnt); + if (!pfd_array) { + cpu_cnt = 0; + return 1; + } + + for (cpu = 0; cpu < cpu_cnt; cpu++) { + pfd_array[cpu] = syscall(__NR_perf_event_open, &attr, + -1, cpu, -1, PERF_FLAG_FD_CLOEXEC); + if (pfd_array[cpu] < 0) + break; + } + + return cpu == 0; +} + +static void close_perf_events(void) +{ + int cpu, fd; + + for (cpu = 0; cpu < cpu_cnt; cpu++) { + fd = pfd_array[cpu]; + if (fd < 0) + break; + close(fd); + } + free(pfd_array); +} + +void serial_test_get_branch_snapshot(void) +{ + struct get_branch_snapshot *skel = NULL; + int err; + + /* Skip the test before we fix LBR snapshot for hypervisor. */ + if (is_hypervisor()) { + test__skip(); + return; + } + + if (create_perf_events()) { + test__skip(); /* system doesn't support LBR */ + goto cleanup; + } + + skel = get_branch_snapshot__open_and_load(); + if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load")) + goto cleanup; + + err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low); + if (!ASSERT_OK(err, "kallsyms_find")) + goto cleanup; + + /* Just a guess for the end of this function, as module functions + * in /proc/kallsyms could come in any order. + */ + skel->bss->address_high = skel->bss->address_low + 128; + + err = get_branch_snapshot__attach(skel); + if (!ASSERT_OK(err, "get_branch_snapshot__attach")) + goto cleanup; + + trigger_module_test_read(100); + + if (skel->bss->total_entries < 16) { + /* too few entries for the hit/waste test */ + test__skip(); + goto cleanup; + } + + ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr"); + + /* Given we stop LBR in software, we will waste a few entries. + * But we should try to waste as few as possible entries. We are at + * about 7 on x86_64 systems. + * Add a check for < 10 so that we get heads-up when something + * changes and wastes too many entries. + */ + ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries"); + +cleanup: + get_branch_snapshot__destroy(skel); + close_perf_events(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c new file mode 100644 index 000000000..28cf63963 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "get_func_args_test.skel.h" + +void test_get_func_args_test(void) +{ + struct get_func_args_test *skel = NULL; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + skel = get_func_args_test__open_and_load(); + if (!ASSERT_OK_PTR(skel, "get_func_args_test__open_and_load")) + return; + + err = get_func_args_test__attach(skel); + if (!ASSERT_OK(err, "get_func_args_test__attach")) + goto cleanup; + + /* This runs bpf_fentry_test* functions and triggers + * fentry/fexit programs. + */ + prog_fd = bpf_program__fd(skel->progs.test1); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + /* This runs bpf_modify_return_test function and triggers + * fmod_ret_test and fexit_test programs. + */ + prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 1234, "test_run"); + + ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); + ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); + ASSERT_EQ(skel->bss->test3_result, 1, "test3_result"); + ASSERT_EQ(skel->bss->test4_result, 1, "test4_result"); + +cleanup: + get_func_args_test__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c new file mode 100644 index 000000000..fede8ef58 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "get_func_ip_test.skel.h" + +static void test_function_entry(void) +{ + struct get_func_ip_test *skel = NULL; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + skel = get_func_ip_test__open(); + if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) + return; + + err = get_func_ip_test__load(skel); + if (!ASSERT_OK(err, "get_func_ip_test__load")) + goto cleanup; + + err = get_func_ip_test__attach(skel); + if (!ASSERT_OK(err, "get_func_ip_test__attach")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.test1); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + prog_fd = bpf_program__fd(skel->progs.test5); + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "test_run"); + + ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); + ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); + ASSERT_EQ(skel->bss->test3_result, 1, "test3_result"); + ASSERT_EQ(skel->bss->test4_result, 1, "test4_result"); + ASSERT_EQ(skel->bss->test5_result, 1, "test5_result"); + +cleanup: + get_func_ip_test__destroy(skel); +} + +/* test6 is x86_64 specific because of the instruction + * offset, disabling it for all other archs + */ +#ifdef __x86_64__ +static void test_function_body(void) +{ + struct get_func_ip_test *skel = NULL; + LIBBPF_OPTS(bpf_test_run_opts, topts); + LIBBPF_OPTS(bpf_kprobe_opts, kopts); + struct bpf_link *link6 = NULL; + int err, prog_fd; + + skel = get_func_ip_test__open(); + if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) + return; + + bpf_program__set_autoload(skel->progs.test6, true); + + err = get_func_ip_test__load(skel); + if (!ASSERT_OK(err, "get_func_ip_test__load")) + goto cleanup; + + kopts.offset = skel->kconfig->CONFIG_X86_KERNEL_IBT ? 9 : 5; + + link6 = bpf_program__attach_kprobe_opts(skel->progs.test6, "bpf_fentry_test6", &kopts); + if (!ASSERT_OK_PTR(link6, "link6")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.test1); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + ASSERT_EQ(skel->bss->test6_result, 1, "test6_result"); + +cleanup: + bpf_link__destroy(link6); + get_func_ip_test__destroy(skel); +} +#else +#define test_function_body() +#endif + +void test_get_func_ip_test(void) +{ + test_function_entry(); + test_function_body(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c new file mode 100644 index 000000000..858e0575f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <sys/socket.h> +#include <test_progs.h> + +#define MAX_CNT_RAWTP 10ull +#define MAX_STACK_RAWTP 100 + +static int duration = 0; + +struct get_stack_trace_t { + int pid; + int kern_stack_size; + int user_stack_size; + int user_stack_buildid_size; + __u64 kern_stack[MAX_STACK_RAWTP]; + __u64 user_stack[MAX_STACK_RAWTP]; + struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; +}; + +static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) +{ + bool good_kern_stack = false, good_user_stack = false; + const char *nonjit_func = "___bpf_prog_run"; + /* perfbuf-submitted data is 4-byte aligned, but we need 8-byte + * alignment, so copy data into a local variable, for simplicity + */ + struct get_stack_trace_t e; + int i, num_stack; + struct ksym *ks; + + memset(&e, 0, sizeof(e)); + memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e)); + + if (size < sizeof(struct get_stack_trace_t)) { + __u64 *raw_data = data; + bool found = false; + + num_stack = size / sizeof(__u64); + /* If jit is enabled, we do not have a good way to + * verify the sanity of the kernel stack. So we + * just assume it is good if the stack is not empty. + * This could be improved in the future. + */ + if (env.jit_enabled) { + found = num_stack > 0; + } else { + for (i = 0; i < num_stack; i++) { + ks = ksym_search(raw_data[i]); + if (ks && (strcmp(ks->name, nonjit_func) == 0)) { + found = true; + break; + } + } + } + if (found) { + good_kern_stack = true; + good_user_stack = true; + } + } else { + num_stack = e.kern_stack_size / sizeof(__u64); + if (env.jit_enabled) { + good_kern_stack = num_stack > 0; + } else { + for (i = 0; i < num_stack; i++) { + ks = ksym_search(e.kern_stack[i]); + if (ks && (strcmp(ks->name, nonjit_func) == 0)) { + good_kern_stack = true; + break; + } + } + } + if (e.user_stack_size > 0 && e.user_stack_buildid_size > 0) + good_user_stack = true; + } + + if (!good_kern_stack) + CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n"); + if (!good_user_stack) + CHECK(!good_user_stack, "user_stack", "corrupted user stack\n"); +} + +void test_get_stack_raw_tp(void) +{ + const char *file = "./test_get_stack_rawtp.bpf.o"; + const char *file_err = "./test_get_stack_rawtp_err.bpf.o"; + const char *prog_name = "bpf_prog1"; + int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP; + struct perf_buffer *pb = NULL; + struct bpf_link *link = NULL; + struct timespec tv = {0, 10}; + struct bpf_program *prog; + struct bpf_object *obj; + struct bpf_map *map; + cpu_set_t cpu_set; + + err = bpf_prog_test_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) + goto close_prog; + + map = bpf_object__find_map_by_name(obj, "perfmap"); + if (CHECK(!map, "bpf_find_map", "not found\n")) + goto close_prog; + + err = load_kallsyms(); + if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno)) + goto close_prog; + + CPU_ZERO(&cpu_set); + CPU_SET(0, &cpu_set); + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); + if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno)) + goto close_prog; + + link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + if (!ASSERT_OK_PTR(link, "attach_raw_tp")) + goto close_prog; + + pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output, + NULL, NULL, NULL); + if (!ASSERT_OK_PTR(pb, "perf_buf__new")) + goto close_prog; + + /* trigger some syscall action */ + for (i = 0; i < MAX_CNT_RAWTP; i++) + nanosleep(&tv, NULL); + + while (exp_cnt > 0) { + err = perf_buffer__poll(pb, 100); + if (err < 0 && CHECK(err < 0, "pb__poll", "err %d\n", err)) + goto close_prog; + exp_cnt -= err; + } + +close_prog: + bpf_link__destroy(link); + perf_buffer__free(pb); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c new file mode 100644 index 000000000..2715c6830 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#include <test_progs.h> +#include "test_stacktrace_build_id.skel.h" + +void test_get_stackid_cannot_attach(void) +{ + struct perf_event_attr attr = { + /* .type = PERF_TYPE_SOFTWARE, */ + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .precise_ip = 1, + .sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK, + .branch_sample_type = PERF_SAMPLE_BRANCH_USER | + PERF_SAMPLE_BRANCH_NO_FLAGS | + PERF_SAMPLE_BRANCH_NO_CYCLES | + PERF_SAMPLE_BRANCH_CALL_STACK, + .sample_period = 5000, + .size = sizeof(struct perf_event_attr), + }; + struct test_stacktrace_build_id *skel; + __u32 duration = 0; + int pmu_fd, err; + + skel = test_stacktrace_build_id__open(); + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + return; + + /* override program type */ + bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT); + + err = test_stacktrace_build_id__load(skel); + if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) + goto cleanup; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (pmu_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) { + printf("%s:SKIP:cannot open PERF_COUNT_HW_CPU_CYCLES with precise_ip > 0\n", + __func__); + test__skip(); + goto cleanup; + } + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_no_callchain"); + close(pmu_fd); + + /* add PERF_SAMPLE_CALLCHAIN, attach should succeed */ + attr.sample_type |= PERF_SAMPLE_CALLCHAIN; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain"); + bpf_link__destroy(skel->links.oncpu); + close(pmu_fd); + + /* add exclude_callchain_kernel, attach should fail */ + attr.exclude_callchain_kernel = 1; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_exclude_callchain_kernel"); + close(pmu_fd); + +cleanup: + test_stacktrace_build_id__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c new file mode 100644 index 000000000..fadfb64e2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +static void test_global_data_number(struct bpf_object *obj, __u32 duration) +{ + int i, err, map_fd; + __u64 num; + + map_fd = bpf_find_map(__func__, obj, "result_number"); + if (CHECK_FAIL(map_fd < 0)) + return; + + struct { + char *name; + uint32_t key; + __u64 num; + } tests[] = { + { "relocate .bss reference", 0, 0 }, + { "relocate .data reference", 1, 42 }, + { "relocate .rodata reference", 2, 24 }, + { "relocate .bss reference", 3, 0 }, + { "relocate .data reference", 4, 0xffeeff }, + { "relocate .rodata reference", 5, 0xabab }, + { "relocate .bss reference", 6, 1234 }, + { "relocate .bss reference", 7, 0 }, + { "relocate .rodata reference", 8, 0xab }, + { "relocate .rodata reference", 9, 0x1111111111111111 }, + { "relocate .rodata reference", 10, ~0 }, + }; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); + CHECK(err || num != tests[i].num, tests[i].name, + "err %d result %llx expected %llx\n", + err, num, tests[i].num); + } +} + +static void test_global_data_string(struct bpf_object *obj, __u32 duration) +{ + int i, err, map_fd; + char str[32]; + + map_fd = bpf_find_map(__func__, obj, "result_string"); + if (CHECK_FAIL(map_fd < 0)) + return; + + struct { + char *name; + uint32_t key; + char str[32]; + } tests[] = { + { "relocate .rodata reference", 0, "abcdefghijklmnopqrstuvwxyz" }, + { "relocate .data reference", 1, "abcdefghijklmnopqrstuvwxyz" }, + { "relocate .bss reference", 2, "" }, + { "relocate .data reference", 3, "abcdexghijklmnopqrstuvwxyz" }, + { "relocate .bss reference", 4, "\0\0hello" }, + }; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + err = bpf_map_lookup_elem(map_fd, &tests[i].key, str); + CHECK(err || memcmp(str, tests[i].str, sizeof(str)), + tests[i].name, "err %d result \'%s\' expected \'%s\'\n", + err, str, tests[i].str); + } +} + +struct foo { + __u8 a; + __u32 b; + __u64 c; +}; + +static void test_global_data_struct(struct bpf_object *obj, __u32 duration) +{ + int i, err, map_fd; + struct foo val; + + map_fd = bpf_find_map(__func__, obj, "result_struct"); + if (CHECK_FAIL(map_fd < 0)) + return; + + struct { + char *name; + uint32_t key; + struct foo val; + } tests[] = { + { "relocate .rodata reference", 0, { 42, 0xfefeefef, 0x1111111111111111ULL, } }, + { "relocate .bss reference", 1, { } }, + { "relocate .rodata reference", 2, { } }, + { "relocate .data reference", 3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } }, + }; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val); + CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)), + tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n", + err, val.a, val.b, val.c, tests[i].val.a, tests[i].val.b, tests[i].val.c); + } +} + +static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration) +{ + int err = -ENOMEM, map_fd, zero = 0; + struct bpf_map *map, *map2; + __u8 *buff; + + map = bpf_object__find_map_by_name(obj, "test_glo.rodata"); + if (!ASSERT_OK_PTR(map, "map")) + return; + if (!ASSERT_TRUE(bpf_map__is_internal(map), "is_internal")) + return; + + /* ensure we can lookup internal maps by their ELF names */ + map2 = bpf_object__find_map_by_name(obj, ".rodata"); + if (!ASSERT_EQ(map, map2, "same_maps")) + return; + + map_fd = bpf_map__fd(map); + if (CHECK_FAIL(map_fd < 0)) + return; + + buff = malloc(bpf_map__value_size(map)); + if (buff) + err = bpf_map_update_elem(map_fd, &zero, buff, 0); + free(buff); + CHECK(!err || errno != EPERM, "test .rodata read-only map", + "err %d errno %d\n", err, errno); +} + +void test_global_data(void) +{ + const char *file = "./test_global_data.bpf.o"; + struct bpf_object *obj; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (!ASSERT_OK(err, "load program")) + return; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "pass global data run err"); + ASSERT_OK(topts.retval, "pass global data run retval"); + + test_global_data_number(obj, topts.duration); + test_global_data_string(obj, topts.duration); + test_global_data_struct(obj, topts.duration); + test_global_data_rdonly(obj, topts.duration); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c new file mode 100644 index 000000000..8466332d7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_global_data_init(void) +{ + const char *file = "./test_global_data.bpf.o"; + int err = -ENOMEM, map_fd, zero = 0; + __u8 *buff = NULL, *newval = NULL; + struct bpf_object *obj; + struct bpf_map *map; + __u32 duration = 0; + size_t sz; + + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK_FAIL(err)) + return; + + map = bpf_object__find_map_by_name(obj, ".rodata"); + if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) + goto out; + + sz = bpf_map__value_size(map); + newval = malloc(sz); + if (CHECK_FAIL(!newval)) + goto out; + + memset(newval, 0, sz); + /* wrong size, should fail */ + err = bpf_map__set_initial_value(map, newval, sz - 1); + if (CHECK(!err, "reject set initial value wrong size", "err %d\n", err)) + goto out; + + err = bpf_map__set_initial_value(map, newval, sz); + if (CHECK(err, "set initial value", "err %d\n", err)) + goto out; + + err = bpf_object__load(obj); + if (CHECK_FAIL(err)) + goto out; + + map_fd = bpf_map__fd(map); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + buff = malloc(sz); + if (buff) + err = bpf_map_lookup_elem(map_fd, &zero, buff); + if (CHECK(!buff || err || memcmp(buff, newval, sz), + "compare .rodata map data override", + "err %d errno %d\n", err, errno)) + goto out; + + memset(newval, 1, sz); + /* object loaded - should fail */ + err = bpf_map__set_initial_value(map, newval, sz); + CHECK(!err, "reject set initial value after load", "err %d\n", err); +out: + free(buff); + free(newval); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/global_func_args.c b/tools/testing/selftests/bpf/prog_tests/global_func_args.c new file mode 100644 index 000000000..d997099f6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/global_func_args.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "test_progs.h" +#include "network_helpers.h" + +static __u32 duration; + +static void test_global_func_args0(struct bpf_object *obj) +{ + int err, i, map_fd, actual_value; + const char *map_name = "values"; + + map_fd = bpf_find_map(__func__, obj, map_name); + if (CHECK(map_fd < 0, "bpf_find_map", "cannot find BPF map %s: %s\n", + map_name, strerror(errno))) + return; + + struct { + const char *descr; + int expected_value; + } tests[] = { + {"passing NULL pointer", 0}, + {"returning value", 1}, + {"reading local variable", 100 }, + {"writing local variable", 101 }, + {"reading global variable", 42 }, + {"writing global variable", 43 }, + {"writing to pointer-to-pointer", 1 }, + }; + + for (i = 0; i < ARRAY_SIZE(tests); ++i) { + const int expected_value = tests[i].expected_value; + + err = bpf_map_lookup_elem(map_fd, &i, &actual_value); + + CHECK(err || actual_value != expected_value, tests[i].descr, + "err %d result %d expected %d\n", err, actual_value, expected_value); + } +} + +void test_global_func_args(void) +{ + const char *file = "./test_global_func_args.bpf.o"; + struct bpf_object *obj; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); + if (CHECK(err, "load program", "error %d loading %s\n", err, file)) + return; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_OK(topts.retval, "test_run retval"); + + test_global_func_args0(obj); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/hash_large_key.c b/tools/testing/selftests/bpf/prog_tests/hash_large_key.c new file mode 100644 index 000000000..34684c0fc --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/hash_large_key.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "test_hash_large_key.skel.h" + +void test_hash_large_key(void) +{ + int err, value = 21, duration = 0, hash_map_fd; + struct test_hash_large_key *skel; + + struct bigelement { + int a; + char b[4096]; + long long c; + } key; + bzero(&key, sizeof(key)); + + skel = test_hash_large_key__open_and_load(); + if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n")) + return; + + hash_map_fd = bpf_map__fd(skel->maps.hash_map); + if (CHECK(hash_map_fd < 0, "bpf_map__fd", "failed\n")) + goto cleanup; + + err = test_hash_large_key__attach(skel); + if (CHECK(err, "attach_raw_tp", "err %d\n", err)) + goto cleanup; + + err = bpf_map_update_elem(hash_map_fd, &key, &value, BPF_ANY); + if (CHECK(err, "bpf_map_update_elem", "errno=%d\n", errno)) + goto cleanup; + + key.c = 1; + err = bpf_map_lookup_elem(hash_map_fd, &key, &value); + if (CHECK(err, "bpf_map_lookup_elem", "errno=%d\n", errno)) + goto cleanup; + + CHECK_FAIL(value != 42); + +cleanup: + test_hash_large_key__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/hashmap.c b/tools/testing/selftests/bpf/prog_tests/hashmap.c new file mode 100644 index 000000000..4747ab18f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/hashmap.c @@ -0,0 +1,377 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * Tests for libbpf's hashmap. + * + * Copyright (c) 2019 Facebook + */ +#include "test_progs.h" +#include "bpf/hashmap.h" + +static int duration = 0; + +static size_t hash_fn(const void *k, void *ctx) +{ + return (long)k; +} + +static bool equal_fn(const void *a, const void *b, void *ctx) +{ + return (long)a == (long)b; +} + +static inline size_t next_pow_2(size_t n) +{ + size_t r = 1; + + while (r < n) + r <<= 1; + return r; +} + +static inline size_t exp_cap(size_t sz) +{ + size_t r = next_pow_2(sz); + + if (sz * 4 / 3 > r) + r <<= 1; + return r; +} + +#define ELEM_CNT 62 + +static void test_hashmap_generic(void) +{ + struct hashmap_entry *entry, *tmp; + int err, bkt, found_cnt, i; + long long found_msk; + struct hashmap *map; + + map = hashmap__new(hash_fn, equal_fn, NULL); + if (!ASSERT_OK_PTR(map, "hashmap__new")) + return; + + for (i = 0; i < ELEM_CNT; i++) { + const void *oldk, *k = (const void *)(long)i; + void *oldv, *v = (void *)(long)(1024 + i); + + err = hashmap__update(map, k, v, &oldk, &oldv); + if (CHECK(err != -ENOENT, "hashmap__update", + "unexpected result: %d\n", err)) + goto cleanup; + + if (i % 2) { + err = hashmap__add(map, k, v); + } else { + err = hashmap__set(map, k, v, &oldk, &oldv); + if (CHECK(oldk != NULL || oldv != NULL, "check_kv", + "unexpected k/v: %p=%p\n", oldk, oldv)) + goto cleanup; + } + + if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n", + (long)k, (long)v, err)) + goto cleanup; + + if (CHECK(!hashmap__find(map, k, &oldv), "elem_find", + "failed to find key %ld\n", (long)k)) + goto cleanup; + if (CHECK(oldv != v, "elem_val", + "found value is wrong: %ld\n", (long)oldv)) + goto cleanup; + } + + if (CHECK(hashmap__size(map) != ELEM_CNT, "hashmap__size", + "invalid map size: %zu\n", hashmap__size(map))) + goto cleanup; + if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), + "hashmap_cap", + "unexpected map capacity: %zu\n", hashmap__capacity(map))) + goto cleanup; + + found_msk = 0; + hashmap__for_each_entry(map, entry, bkt) { + long k = (long)entry->key; + long v = (long)entry->value; + + found_msk |= 1ULL << k; + if (CHECK(v - k != 1024, "check_kv", + "invalid k/v pair: %ld = %ld\n", k, v)) + goto cleanup; + } + if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, "elem_cnt", + "not all keys iterated: %llx\n", found_msk)) + goto cleanup; + + for (i = 0; i < ELEM_CNT; i++) { + const void *oldk, *k = (const void *)(long)i; + void *oldv, *v = (void *)(long)(256 + i); + + err = hashmap__add(map, k, v); + if (CHECK(err != -EEXIST, "hashmap__add", + "unexpected add result: %d\n", err)) + goto cleanup; + + if (i % 2) + err = hashmap__update(map, k, v, &oldk, &oldv); + else + err = hashmap__set(map, k, v, &oldk, &oldv); + + if (CHECK(err, "elem_upd", + "failed to update k/v %ld = %ld: %d\n", + (long)k, (long)v, err)) + goto cleanup; + if (CHECK(!hashmap__find(map, k, &oldv), "elem_find", + "failed to find key %ld\n", (long)k)) + goto cleanup; + if (CHECK(oldv != v, "elem_val", + "found value is wrong: %ld\n", (long)oldv)) + goto cleanup; + } + + if (CHECK(hashmap__size(map) != ELEM_CNT, "hashmap__size", + "invalid updated map size: %zu\n", hashmap__size(map))) + goto cleanup; + if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), + "hashmap__capacity", + "unexpected map capacity: %zu\n", hashmap__capacity(map))) + goto cleanup; + + found_msk = 0; + hashmap__for_each_entry_safe(map, entry, tmp, bkt) { + long k = (long)entry->key; + long v = (long)entry->value; + + found_msk |= 1ULL << k; + if (CHECK(v - k != 256, "elem_check", + "invalid updated k/v pair: %ld = %ld\n", k, v)) + goto cleanup; + } + if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, "elem_cnt", + "not all keys iterated after update: %llx\n", found_msk)) + goto cleanup; + + found_cnt = 0; + hashmap__for_each_key_entry(map, entry, (void *)0) { + found_cnt++; + } + if (CHECK(!found_cnt, "found_cnt", + "didn't find any entries for key 0\n")) + goto cleanup; + + found_msk = 0; + found_cnt = 0; + hashmap__for_each_key_entry_safe(map, entry, tmp, (void *)0) { + const void *oldk, *k; + void *oldv, *v; + + k = entry->key; + v = entry->value; + + found_cnt++; + found_msk |= 1ULL << (long)k; + + if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del", + "failed to delete k/v %ld = %ld\n", + (long)k, (long)v)) + goto cleanup; + if (CHECK(oldk != k || oldv != v, "check_old", + "invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n", + (long)k, (long)v, (long)oldk, (long)oldv)) + goto cleanup; + if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del", + "unexpectedly deleted k/v %ld = %ld\n", + (long)oldk, (long)oldv)) + goto cleanup; + } + + if (CHECK(!found_cnt || !found_msk, "found_entries", + "didn't delete any key entries\n")) + goto cleanup; + if (CHECK(hashmap__size(map) != ELEM_CNT - found_cnt, "elem_cnt", + "invalid updated map size (already deleted: %d): %zu\n", + found_cnt, hashmap__size(map))) + goto cleanup; + if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), + "hashmap__capacity", + "unexpected map capacity: %zu\n", hashmap__capacity(map))) + goto cleanup; + + hashmap__for_each_entry_safe(map, entry, tmp, bkt) { + const void *oldk, *k; + void *oldv, *v; + + k = entry->key; + v = entry->value; + + found_cnt++; + found_msk |= 1ULL << (long)k; + + if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del", + "failed to delete k/v %ld = %ld\n", + (long)k, (long)v)) + goto cleanup; + if (CHECK(oldk != k || oldv != v, "elem_check", + "invalid old k/v: expect %ld = %ld, got %ld = %ld\n", + (long)k, (long)v, (long)oldk, (long)oldv)) + goto cleanup; + if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del", + "unexpectedly deleted k/v %ld = %ld\n", + (long)k, (long)v)) + goto cleanup; + } + + if (CHECK(found_cnt != ELEM_CNT || found_msk != (1ULL << ELEM_CNT) - 1, + "found_cnt", + "not all keys were deleted: found_cnt:%d, found_msk:%llx\n", + found_cnt, found_msk)) + goto cleanup; + if (CHECK(hashmap__size(map) != 0, "hashmap__size", + "invalid updated map size (already deleted: %d): %zu\n", + found_cnt, hashmap__size(map))) + goto cleanup; + + found_cnt = 0; + hashmap__for_each_entry(map, entry, bkt) { + CHECK(false, "elem_exists", + "unexpected map entries left: %ld = %ld\n", + (long)entry->key, (long)entry->value); + goto cleanup; + } + + hashmap__clear(map); + hashmap__for_each_entry(map, entry, bkt) { + CHECK(false, "elem_exists", + "unexpected map entries left: %ld = %ld\n", + (long)entry->key, (long)entry->value); + goto cleanup; + } + +cleanup: + hashmap__free(map); +} + +static size_t collision_hash_fn(const void *k, void *ctx) +{ + return 0; +} + +static void test_hashmap_multimap(void) +{ + void *k1 = (void *)0, *k2 = (void *)1; + struct hashmap_entry *entry; + struct hashmap *map; + long found_msk; + int err, bkt; + + /* force collisions */ + map = hashmap__new(collision_hash_fn, equal_fn, NULL); + if (!ASSERT_OK_PTR(map, "hashmap__new")) + return; + + /* set up multimap: + * [0] -> 1, 2, 4; + * [1] -> 8, 16, 32; + */ + err = hashmap__append(map, k1, (void *)1); + if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) + goto cleanup; + err = hashmap__append(map, k1, (void *)2); + if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) + goto cleanup; + err = hashmap__append(map, k1, (void *)4); + if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) + goto cleanup; + + err = hashmap__append(map, k2, (void *)8); + if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) + goto cleanup; + err = hashmap__append(map, k2, (void *)16); + if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) + goto cleanup; + err = hashmap__append(map, k2, (void *)32); + if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) + goto cleanup; + + if (CHECK(hashmap__size(map) != 6, "hashmap_size", + "invalid map size: %zu\n", hashmap__size(map))) + goto cleanup; + + /* verify global iteration still works and sees all values */ + found_msk = 0; + hashmap__for_each_entry(map, entry, bkt) { + found_msk |= (long)entry->value; + } + if (CHECK(found_msk != (1 << 6) - 1, "found_msk", + "not all keys iterated: %lx\n", found_msk)) + goto cleanup; + + /* iterate values for key 1 */ + found_msk = 0; + hashmap__for_each_key_entry(map, entry, k1) { + found_msk |= (long)entry->value; + } + if (CHECK(found_msk != (1 | 2 | 4), "found_msk", + "invalid k1 values: %lx\n", found_msk)) + goto cleanup; + + /* iterate values for key 2 */ + found_msk = 0; + hashmap__for_each_key_entry(map, entry, k2) { + found_msk |= (long)entry->value; + } + if (CHECK(found_msk != (8 | 16 | 32), "found_msk", + "invalid k2 values: %lx\n", found_msk)) + goto cleanup; + +cleanup: + hashmap__free(map); +} + +static void test_hashmap_empty() +{ + struct hashmap_entry *entry; + int bkt; + struct hashmap *map; + void *k = (void *)0; + + /* force collisions */ + map = hashmap__new(hash_fn, equal_fn, NULL); + if (!ASSERT_OK_PTR(map, "hashmap__new")) + goto cleanup; + + if (CHECK(hashmap__size(map) != 0, "hashmap__size", + "invalid map size: %zu\n", hashmap__size(map))) + goto cleanup; + if (CHECK(hashmap__capacity(map) != 0, "hashmap__capacity", + "invalid map capacity: %zu\n", hashmap__capacity(map))) + goto cleanup; + if (CHECK(hashmap__find(map, k, NULL), "elem_find", + "unexpected find\n")) + goto cleanup; + if (CHECK(hashmap__delete(map, k, NULL, NULL), "elem_del", + "unexpected delete\n")) + goto cleanup; + + hashmap__for_each_entry(map, entry, bkt) { + CHECK(false, "elem_found", "unexpected iterated entry\n"); + goto cleanup; + } + hashmap__for_each_key_entry(map, entry, k) { + CHECK(false, "key_found", "unexpected key entry\n"); + goto cleanup; + } + +cleanup: + hashmap__free(map); +} + +void test_hashmap() +{ + if (test__start_subtest("generic")) + test_hashmap_generic(); + if (test__start_subtest("multimap")) + test_hashmap_multimap(); + if (test__start_subtest("empty")) + test_hashmap_empty(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c new file mode 100644 index 000000000..0354f9b82 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "test_helper_restricted.skel.h" + +void test_helper_restricted(void) +{ + int prog_i = 0, prog_cnt; + + do { + struct test_helper_restricted *test; + int err; + + test = test_helper_restricted__open(); + if (!ASSERT_OK_PTR(test, "open")) + return; + + prog_cnt = test->skeleton->prog_cnt; + + for (int j = 0; j < prog_cnt; ++j) { + struct bpf_program *prog = *test->skeleton->progs[j].prog; + + bpf_program__set_autoload(prog, true); + } + + err = test_helper_restricted__load(test); + ASSERT_ERR(err, "load_should_fail"); + + test_helper_restricted__destroy(test); + } while (++prog_i < prog_cnt); +} diff --git a/tools/testing/selftests/bpf/prog_tests/htab_update.c b/tools/testing/selftests/bpf/prog_tests/htab_update.c new file mode 100644 index 000000000..2bc85f481 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/htab_update.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022. Huawei Technologies Co., Ltd */ +#define _GNU_SOURCE +#include <sched.h> +#include <stdbool.h> +#include <test_progs.h> +#include "htab_update.skel.h" + +struct htab_update_ctx { + int fd; + int loop; + bool stop; +}; + +static void test_reenter_update(void) +{ + struct htab_update *skel; + unsigned int key, value; + int err; + + skel = htab_update__open(); + if (!ASSERT_OK_PTR(skel, "htab_update__open")) + return; + + /* lookup_elem_raw() may be inlined and find_kernel_btf_id() will return -ESRCH */ + bpf_program__set_autoload(skel->progs.lookup_elem_raw, true); + err = htab_update__load(skel); + if (!ASSERT_TRUE(!err || err == -ESRCH, "htab_update__load") || err) + goto out; + + skel->bss->pid = getpid(); + err = htab_update__attach(skel); + if (!ASSERT_OK(err, "htab_update__attach")) + goto out; + + /* Will trigger the reentrancy of bpf_map_update_elem() */ + key = 0; + value = 0; + err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, &value, 0); + if (!ASSERT_OK(err, "add element")) + goto out; + + ASSERT_EQ(skel->bss->update_err, -EBUSY, "no reentrancy"); +out: + htab_update__destroy(skel); +} + +static void *htab_update_thread(void *arg) +{ + struct htab_update_ctx *ctx = arg; + cpu_set_t cpus; + int i; + + /* Pinned on CPU 0 */ + CPU_ZERO(&cpus); + CPU_SET(0, &cpus); + pthread_setaffinity_np(pthread_self(), sizeof(cpus), &cpus); + + i = 0; + while (i++ < ctx->loop && !ctx->stop) { + unsigned int key = 0, value = 0; + int err; + + err = bpf_map_update_elem(ctx->fd, &key, &value, 0); + if (err) { + ctx->stop = true; + return (void *)(long)err; + } + } + + return NULL; +} + +static void test_concurrent_update(void) +{ + struct htab_update_ctx ctx; + struct htab_update *skel; + unsigned int i, nr; + pthread_t *tids; + int err; + + skel = htab_update__open_and_load(); + if (!ASSERT_OK_PTR(skel, "htab_update__open_and_load")) + return; + + ctx.fd = bpf_map__fd(skel->maps.htab); + ctx.loop = 1000; + ctx.stop = false; + + nr = 4; + tids = calloc(nr, sizeof(*tids)); + if (!ASSERT_NEQ(tids, NULL, "no mem")) + goto out; + + for (i = 0; i < nr; i++) { + err = pthread_create(&tids[i], NULL, htab_update_thread, &ctx); + if (!ASSERT_OK(err, "pthread_create")) { + unsigned int j; + + ctx.stop = true; + for (j = 0; j < i; j++) + pthread_join(tids[j], NULL); + goto out; + } + } + + for (i = 0; i < nr; i++) { + void *thread_err = NULL; + + pthread_join(tids[i], &thread_err); + ASSERT_EQ(thread_err, NULL, "update error"); + } + +out: + if (tids) + free(tids); + htab_update__destroy(skel); +} + +void test_htab_update(void) +{ + if (test__start_subtest("reenter_update")) + test_reenter_update(); + if (test__start_subtest("concurrent_update")) + test_concurrent_update(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c new file mode 100644 index 000000000..73579370b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include "kfree_skb.skel.h" + +struct meta { + int ifindex; + __u32 cb32_0; + __u8 cb8_0; +}; + +static union { + __u32 cb32[5]; + __u8 cb8[20]; +} cb = { + .cb32[0] = 0x81828384, +}; + +static void on_sample(void *ctx, int cpu, void *data, __u32 size) +{ + struct meta *meta = (struct meta *)data; + struct ipv6_packet *pkt_v6 = data + sizeof(*meta); + int duration = 0; + + if (CHECK(size != 72 + sizeof(*meta), "check_size", "size %u != %zu\n", + size, 72 + sizeof(*meta))) + return; + if (CHECK(meta->ifindex != 1, "check_meta_ifindex", + "meta->ifindex = %d\n", meta->ifindex)) + /* spurious kfree_skb not on loopback device */ + return; + if (CHECK(meta->cb8_0 != cb.cb8[0], "check_cb8_0", "cb8_0 %x != %x\n", + meta->cb8_0, cb.cb8[0])) + return; + if (CHECK(meta->cb32_0 != cb.cb32[0], "check_cb32_0", + "cb32_0 %x != %x\n", + meta->cb32_0, cb.cb32[0])) + return; + if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth", + "h_proto %x\n", pkt_v6->eth.h_proto)) + return; + if (CHECK(pkt_v6->iph.nexthdr != 6, "check_ip", + "iph.nexthdr %x\n", pkt_v6->iph.nexthdr)) + return; + if (CHECK(pkt_v6->tcp.doff != 5, "check_tcp", + "tcp.doff %x\n", pkt_v6->tcp.doff)) + return; + + *(bool *)ctx = true; +} + +/* TODO: fix kernel panic caused by this test in parallel mode */ +void serial_test_kfree_skb(void) +{ + struct __sk_buff skb = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v6, + .data_size_in = sizeof(pkt_v6), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + ); + struct kfree_skb *skel = NULL; + struct bpf_link *link; + struct bpf_object *obj; + struct perf_buffer *pb = NULL; + int err, prog_fd; + bool passed = false; + __u32 duration = 0; + const int zero = 0; + bool test_ok[2]; + + err = bpf_prog_test_load("./test_pkt_access.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + + skel = kfree_skb__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kfree_skb_skel")) + goto close_prog; + + link = bpf_program__attach_raw_tracepoint(skel->progs.trace_kfree_skb, NULL); + if (!ASSERT_OK_PTR(link, "attach_raw_tp")) + goto close_prog; + skel->links.trace_kfree_skb = link; + + link = bpf_program__attach_trace(skel->progs.fentry_eth_type_trans); + if (!ASSERT_OK_PTR(link, "attach fentry")) + goto close_prog; + skel->links.fentry_eth_type_trans = link; + + link = bpf_program__attach_trace(skel->progs.fexit_eth_type_trans); + if (!ASSERT_OK_PTR(link, "attach fexit")) + goto close_prog; + skel->links.fexit_eth_type_trans = link; + + /* set up perf buffer */ + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, + on_sample, NULL, &passed, NULL); + if (!ASSERT_OK_PTR(pb, "perf_buf__new")) + goto close_prog; + + memcpy(skb.cb, &cb, sizeof(cb)); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6 test_run"); + ASSERT_OK(topts.retval, "ipv6 test_run retval"); + + /* read perf buffer */ + err = perf_buffer__poll(pb, 100); + if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) + goto close_prog; + + /* make sure kfree_skb program was triggered + * and it sent expected skb into ring buffer + */ + ASSERT_TRUE(passed, "passed"); + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.bss), &zero, test_ok); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + CHECK_FAIL(!test_ok[0] || !test_ok[1]); +close_prog: + perf_buffer__free(pb); + bpf_object__close(obj); + kfree_skb__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c new file mode 100644 index 000000000..36071f3f1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include <network_helpers.h> +#include "kfunc_call_fail.skel.h" +#include "kfunc_call_test.skel.h" +#include "kfunc_call_test.lskel.h" +#include "kfunc_call_test_subprog.skel.h" +#include "kfunc_call_test_subprog.lskel.h" +#include "kfunc_call_destructive.skel.h" + +#include "cap_helpers.h" + +static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576]; + +enum kfunc_test_type { + tc_test = 0, + syscall_test, + syscall_null_ctx_test, +}; + +struct kfunc_test_params { + const char *prog_name; + unsigned long lskel_prog_desc_offset; + int retval; + enum kfunc_test_type test_type; + const char *expected_err_msg; +}; + +#define __BPF_TEST_SUCCESS(name, __retval, type) \ + { \ + .prog_name = #name, \ + .lskel_prog_desc_offset = offsetof(struct kfunc_call_test_lskel, progs.name), \ + .retval = __retval, \ + .test_type = type, \ + .expected_err_msg = NULL, \ + } + +#define __BPF_TEST_FAIL(name, __retval, type, error_msg) \ + { \ + .prog_name = #name, \ + .lskel_prog_desc_offset = 0 /* unused when test is failing */, \ + .retval = __retval, \ + .test_type = type, \ + .expected_err_msg = error_msg, \ + } + +#define TC_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, tc_test) +#define SYSCALL_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_test) +#define SYSCALL_NULL_CTX_TEST(name, retval) __BPF_TEST_SUCCESS(name, retval, syscall_null_ctx_test) + +#define TC_FAIL(name, retval, error_msg) __BPF_TEST_FAIL(name, retval, tc_test, error_msg) +#define SYSCALL_NULL_CTX_FAIL(name, retval, error_msg) \ + __BPF_TEST_FAIL(name, retval, syscall_null_ctx_test, error_msg) + +static struct kfunc_test_params kfunc_tests[] = { + /* failure cases: + * if retval is 0 -> the program will fail to load and the error message is an error + * if retval is not 0 -> the program can be loaded but running it will gives the + * provided return value. The error message is thus the one + * from a successful load + */ + SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_fail, -EINVAL, "processed 4 insns"), + SYSCALL_NULL_CTX_FAIL(kfunc_syscall_test_null_fail, -EINVAL, "processed 4 insns"), + TC_FAIL(kfunc_call_test_get_mem_fail_rdonly, 0, "R0 cannot write into rdonly_mem"), + TC_FAIL(kfunc_call_test_get_mem_fail_use_after_free, 0, "invalid mem access 'scalar'"), + TC_FAIL(kfunc_call_test_get_mem_fail_oob, 0, "min value is outside of the allowed memory range"), + TC_FAIL(kfunc_call_test_get_mem_fail_not_const, 0, "is not a const"), + TC_FAIL(kfunc_call_test_mem_acquire_fail, 0, "acquire kernel function does not return PTR_TO_BTF_ID"), + + /* success cases */ + TC_TEST(kfunc_call_test1, 12), + TC_TEST(kfunc_call_test2, 3), + TC_TEST(kfunc_call_test_ref_btf_id, 0), + TC_TEST(kfunc_call_test_get_mem, 42), + SYSCALL_TEST(kfunc_syscall_test, 0), + SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0), +}; + +struct syscall_test_args { + __u8 data[16]; + size_t size; +}; + +static void verify_success(struct kfunc_test_params *param) +{ + struct kfunc_call_test_lskel *lskel = NULL; + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct bpf_prog_desc *lskel_prog; + struct kfunc_call_test *skel; + struct bpf_program *prog; + int prog_fd, err; + struct syscall_test_args args = { + .size = 10, + }; + + switch (param->test_type) { + case syscall_test: + topts.ctx_in = &args; + topts.ctx_size_in = sizeof(args); + /* fallthrough */ + case syscall_null_ctx_test: + break; + case tc_test: + topts.data_in = &pkt_v4; + topts.data_size_in = sizeof(pkt_v4); + topts.repeat = 1; + break; + } + + /* first test with normal libbpf */ + skel = kfunc_call_test__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, param->prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + prog_fd = bpf_program__fd(prog); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, param->prog_name)) + goto cleanup; + + if (!ASSERT_EQ(topts.retval, param->retval, "retval")) + goto cleanup; + + /* second test with light skeletons */ + lskel = kfunc_call_test_lskel__open_and_load(); + if (!ASSERT_OK_PTR(lskel, "lskel")) + goto cleanup; + + lskel_prog = (struct bpf_prog_desc *)((char *)lskel + param->lskel_prog_desc_offset); + + prog_fd = lskel_prog->prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, param->prog_name)) + goto cleanup; + + ASSERT_EQ(topts.retval, param->retval, "retval"); + +cleanup: + kfunc_call_test__destroy(skel); + if (lskel) + kfunc_call_test_lskel__destroy(lskel); +} + +static void verify_fail(struct kfunc_test_params *param) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct bpf_program *prog; + struct kfunc_call_fail *skel; + int prog_fd, err; + struct syscall_test_args args = { + .size = 10, + }; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + switch (param->test_type) { + case syscall_test: + topts.ctx_in = &args; + topts.ctx_size_in = sizeof(args); + /* fallthrough */ + case syscall_null_ctx_test: + break; + case tc_test: + topts.data_in = &pkt_v4; + topts.data_size_in = sizeof(pkt_v4); + topts.repeat = 1; + break; + } + + skel = kfunc_call_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "kfunc_call_fail__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, param->prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + err = kfunc_call_fail__load(skel); + if (!param->retval) { + /* the verifier is supposed to complain and refuses to load */ + if (!ASSERT_ERR(err, "unexpected load success")) + goto out_err; + + } else { + /* the program is loaded but must dynamically fail */ + if (!ASSERT_OK(err, "unexpected load error")) + goto out_err; + + prog_fd = bpf_program__fd(prog); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_EQ(err, param->retval, param->prog_name)) + goto out_err; + } + +out_err: + if (!ASSERT_OK_PTR(strstr(obj_log_buf, param->expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", param->expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + kfunc_call_fail__destroy(skel); +} + +static void test_main(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kfunc_tests); i++) { + if (!test__start_subtest(kfunc_tests[i].prog_name)) + continue; + + if (!kfunc_tests[i].expected_err_msg) + verify_success(&kfunc_tests[i]); + else + verify_fail(&kfunc_tests[i]); + } +} + +static void test_subprog(void) +{ + struct kfunc_call_test_subprog *skel; + int prog_fd, err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + skel = kfunc_call_test_subprog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + return; + + prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "bpf_prog_test_run(test1)"); + ASSERT_EQ(topts.retval, 10, "test1-retval"); + ASSERT_NEQ(skel->data->active_res, -1, "active_res"); + ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res"); + + kfunc_call_test_subprog__destroy(skel); +} + +static void test_subprog_lskel(void) +{ + struct kfunc_call_test_subprog_lskel *skel; + int prog_fd, err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + skel = kfunc_call_test_subprog_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + return; + + prog_fd = skel->progs.kfunc_call_test1.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "bpf_prog_test_run(test1)"); + ASSERT_EQ(topts.retval, 10, "test1-retval"); + ASSERT_NEQ(skel->data->active_res, -1, "active_res"); + ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res"); + + kfunc_call_test_subprog_lskel__destroy(skel); +} + +static int test_destructive_open_and_load(void) +{ + struct kfunc_call_destructive *skel; + int err; + + skel = kfunc_call_destructive__open(); + if (!ASSERT_OK_PTR(skel, "prog_open")) + return -1; + + err = kfunc_call_destructive__load(skel); + + kfunc_call_destructive__destroy(skel); + + return err; +} + +static void test_destructive(void) +{ + __u64 save_caps = 0; + + ASSERT_OK(test_destructive_open_and_load(), "successful_load"); + + if (!ASSERT_OK(cap_disable_effective(1ULL << CAP_SYS_BOOT, &save_caps), "drop_caps")) + return; + + ASSERT_EQ(test_destructive_open_and_load(), -13, "no_caps_failure"); + + cap_enable_effective(save_caps, NULL); +} + +void test_kfunc_call(void) +{ + test_main(); + + if (test__start_subtest("subprog")) + test_subprog(); + + if (test__start_subtest("subprog_lskel")) + test_subprog_lskel(); + + if (test__start_subtest("destructive")) + test_destructive(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c new file mode 100644 index 000000000..c210657d4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (c) 2022 Facebook + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include <test_progs.h> +#include "test_kfunc_dynptr_param.skel.h" + +static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576]; + +static struct { + const char *prog_name; + const char *expected_verifier_err_msg; + int expected_runtime_err; +} kfunc_dynptr_tests[] = { + {"dynptr_type_not_supp", + "arg#0 pointer type STRUCT bpf_dynptr_kern points to unsupported dynamic pointer type", 0}, + {"not_valid_dynptr", + "arg#0 pointer type STRUCT bpf_dynptr_kern must be valid and initialized", 0}, + {"not_ptr_to_stack", "arg#0 pointer type STRUCT bpf_dynptr_kern not to stack", 0}, + {"dynptr_data_null", NULL, -EBADMSG}, +}; + +static bool kfunc_not_supported; + +static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, + va_list args) +{ + if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n")) + return 0; + + if (strcmp(va_arg(args, char *), "bpf_verify_pkcs7_signature")) + return 0; + + kfunc_not_supported = true; + return 0; +} + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + struct test_kfunc_dynptr_param *skel; + LIBBPF_OPTS(bpf_object_open_opts, opts); + libbpf_print_fn_t old_print_cb; + struct bpf_program *prog; + int err; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = test_kfunc_dynptr_param__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + kfunc_not_supported = false; + + old_print_cb = libbpf_set_print(libbpf_print_cb); + err = test_kfunc_dynptr_param__load(skel); + libbpf_set_print(old_print_cb); + + if (err < 0 && kfunc_not_supported) { + fprintf(stderr, + "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n", + __func__); + test__skip(); + goto cleanup; + } + + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + test_kfunc_dynptr_param__destroy(skel); +} + +static void verify_success(const char *prog_name, int expected_runtime_err) +{ + struct test_kfunc_dynptr_param *skel; + libbpf_print_fn_t old_print_cb; + struct bpf_program *prog; + struct bpf_link *link; + __u32 next_id; + int err; + + skel = test_kfunc_dynptr_param__open(); + if (!ASSERT_OK_PTR(skel, "test_kfunc_dynptr_param__open")) + return; + + skel->bss->pid = getpid(); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + kfunc_not_supported = false; + + old_print_cb = libbpf_set_print(libbpf_print_cb); + err = test_kfunc_dynptr_param__load(skel); + libbpf_set_print(old_print_cb); + + if (err < 0 && kfunc_not_supported) { + fprintf(stderr, + "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n", + __func__); + test__skip(); + goto cleanup; + } + + if (!ASSERT_OK(err, "test_kfunc_dynptr_param__load")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) + goto cleanup; + + err = bpf_prog_get_next_id(0, &next_id); + + bpf_link__destroy(link); + + if (!ASSERT_OK(err, "bpf_prog_get_next_id")) + goto cleanup; + + ASSERT_EQ(skel->bss->err, expected_runtime_err, "err"); + +cleanup: + test_kfunc_dynptr_param__destroy(skel); +} + +void test_kfunc_dynptr_param(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kfunc_dynptr_tests); i++) { + if (!test__start_subtest(kfunc_dynptr_tests[i].prog_name)) + continue; + + if (kfunc_dynptr_tests[i].expected_verifier_err_msg) + verify_fail(kfunc_dynptr_tests[i].prog_name, + kfunc_dynptr_tests[i].expected_verifier_err_msg); + else + verify_success(kfunc_dynptr_tests[i].prog_name, + kfunc_dynptr_tests[i].expected_runtime_err); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c new file mode 100644 index 000000000..0d82e28ae --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "kprobe_multi.skel.h" +#include "trace_helpers.h" +#include "kprobe_multi_empty.skel.h" +#include "bpf/libbpf_internal.h" +#include "bpf/hashmap.h" + +static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + prog_fd = bpf_program__fd(skel->progs.trigger); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); + ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); + ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); + ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); + ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); + ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); + ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); + ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); + + if (test_return) { + ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); + ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); + ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); + ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); + ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); + ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); + ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); + ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); + } +} + +static void test_skel_api(void) +{ + struct kprobe_multi *skel = NULL; + int err; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + err = kprobe_multi__attach(skel); + if (!ASSERT_OK(err, "kprobe_multi__attach")) + goto cleanup; + + kprobe_multi_test_run(skel, true); + +cleanup: + kprobe_multi__destroy(skel); +} + +static void test_link_api(struct bpf_link_create_opts *opts) +{ + int prog_fd, link1_fd = -1, link2_fd = -1; + struct kprobe_multi *skel = NULL; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + prog_fd = bpf_program__fd(skel->progs.test_kprobe); + link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts); + if (!ASSERT_GE(link1_fd, 0, "link_fd")) + goto cleanup; + + opts->kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.test_kretprobe); + link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts); + if (!ASSERT_GE(link2_fd, 0, "link_fd")) + goto cleanup; + + kprobe_multi_test_run(skel, true); + +cleanup: + if (link1_fd != -1) + close(link1_fd); + if (link2_fd != -1) + close(link2_fd); + kprobe_multi__destroy(skel); +} + +#define GET_ADDR(__sym, __addr) ({ \ + __addr = ksym_get_addr(__sym); \ + if (!ASSERT_NEQ(__addr, 0, "kallsyms load failed for " #__sym)) \ + return; \ +}) + +static void test_link_api_addrs(void) +{ + LIBBPF_OPTS(bpf_link_create_opts, opts); + unsigned long long addrs[8]; + + GET_ADDR("bpf_fentry_test1", addrs[0]); + GET_ADDR("bpf_fentry_test2", addrs[1]); + GET_ADDR("bpf_fentry_test3", addrs[2]); + GET_ADDR("bpf_fentry_test4", addrs[3]); + GET_ADDR("bpf_fentry_test5", addrs[4]); + GET_ADDR("bpf_fentry_test6", addrs[5]); + GET_ADDR("bpf_fentry_test7", addrs[6]); + GET_ADDR("bpf_fentry_test8", addrs[7]); + + opts.kprobe_multi.addrs = (const unsigned long*) addrs; + opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); + test_link_api(&opts); +} + +static void test_link_api_syms(void) +{ + LIBBPF_OPTS(bpf_link_create_opts, opts); + const char *syms[8] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + "bpf_fentry_test3", + "bpf_fentry_test4", + "bpf_fentry_test5", + "bpf_fentry_test6", + "bpf_fentry_test7", + "bpf_fentry_test8", + }; + + opts.kprobe_multi.syms = syms; + opts.kprobe_multi.cnt = ARRAY_SIZE(syms); + test_link_api(&opts); +} + +static void +test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + struct kprobe_multi *skel = NULL; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + pattern, opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + if (opts) { + opts->retprobe = true; + link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe_manual, + pattern, opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + } + + kprobe_multi_test_run(skel, !!opts); + +cleanup: + bpf_link__destroy(link2); + bpf_link__destroy(link1); + kprobe_multi__destroy(skel); +} + +static void test_attach_api_pattern(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + + test_attach_api("bpf_fentry_test*", &opts); + test_attach_api("bpf_fentry_test?", NULL); +} + +static void test_attach_api_addrs(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + unsigned long long addrs[8]; + + GET_ADDR("bpf_fentry_test1", addrs[0]); + GET_ADDR("bpf_fentry_test2", addrs[1]); + GET_ADDR("bpf_fentry_test3", addrs[2]); + GET_ADDR("bpf_fentry_test4", addrs[3]); + GET_ADDR("bpf_fentry_test5", addrs[4]); + GET_ADDR("bpf_fentry_test6", addrs[5]); + GET_ADDR("bpf_fentry_test7", addrs[6]); + GET_ADDR("bpf_fentry_test8", addrs[7]); + + opts.addrs = (const unsigned long *) addrs; + opts.cnt = ARRAY_SIZE(addrs); + test_attach_api(NULL, &opts); +} + +static void test_attach_api_syms(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + const char *syms[8] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + "bpf_fentry_test3", + "bpf_fentry_test4", + "bpf_fentry_test5", + "bpf_fentry_test6", + "bpf_fentry_test7", + "bpf_fentry_test8", + }; + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + test_attach_api(NULL, &opts); +} + +static void test_attach_api_fails(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct kprobe_multi *skel = NULL; + struct bpf_link *link = NULL; + unsigned long long addrs[2]; + const char *syms[2] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + }; + __u64 cookies[2]; + + addrs[0] = ksym_get_addr("bpf_fentry_test1"); + addrs[1] = ksym_get_addr("bpf_fentry_test2"); + + if (!ASSERT_FALSE(!addrs[0] || !addrs[1], "ksym_get_addr")) + goto cleanup; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + + /* fail_1 - pattern and opts NULL */ + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + NULL, NULL); + if (!ASSERT_ERR_PTR(link, "fail_1")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_1_error")) + goto cleanup; + + /* fail_2 - both addrs and syms set */ + opts.addrs = (const unsigned long *) addrs; + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = NULL; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + NULL, &opts); + if (!ASSERT_ERR_PTR(link, "fail_2")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_2_error")) + goto cleanup; + + /* fail_3 - pattern and addrs set */ + opts.addrs = (const unsigned long *) addrs; + opts.syms = NULL; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = NULL; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "ksys_*", &opts); + if (!ASSERT_ERR_PTR(link, "fail_3")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_3_error")) + goto cleanup; + + /* fail_4 - pattern and cnt set */ + opts.addrs = NULL; + opts.syms = NULL; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = NULL; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "ksys_*", &opts); + if (!ASSERT_ERR_PTR(link, "fail_4")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_4_error")) + goto cleanup; + + /* fail_5 - pattern and cookies */ + opts.addrs = NULL; + opts.syms = NULL; + opts.cnt = 0; + opts.cookies = cookies; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "ksys_*", &opts); + if (!ASSERT_ERR_PTR(link, "fail_5")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_5_error")) + goto cleanup; + +cleanup: + bpf_link__destroy(link); + kprobe_multi__destroy(skel); +} + +static inline __u64 get_time_ns(void) +{ + struct timespec t; + + clock_gettime(CLOCK_MONOTONIC, &t); + return (__u64) t.tv_sec * 1000000000 + t.tv_nsec; +} + +static size_t symbol_hash(const void *key, void *ctx __maybe_unused) +{ + return str_hash((const char *) key); +} + +static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_unused) +{ + return strcmp((const char *) key1, (const char *) key2) == 0; +} + +static int get_syms(char ***symsp, size_t *cntp) +{ + size_t cap = 0, cnt = 0, i; + char *name = NULL, **syms = NULL; + struct hashmap *map; + char buf[256]; + FILE *f; + int err = 0; + + /* + * The available_filter_functions contains many duplicates, + * but other than that all symbols are usable in kprobe multi + * interface. + * Filtering out duplicates by using hashmap__add, which won't + * add existing entry. + */ + f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r"); + if (!f) + return -EINVAL; + + map = hashmap__new(symbol_hash, symbol_equal, NULL); + if (IS_ERR(map)) { + err = libbpf_get_error(map); + goto error; + } + + while (fgets(buf, sizeof(buf), f)) { + /* skip modules */ + if (strchr(buf, '[')) + continue; + + free(name); + if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1) + continue; + /* + * We attach to almost all kernel functions and some of them + * will cause 'suspicious RCU usage' when fprobe is attached + * to them. Filter out the current culprits - arch_cpu_idle + * default_idle and rcu_* functions. + */ + if (!strcmp(name, "arch_cpu_idle")) + continue; + if (!strcmp(name, "default_idle")) + continue; + if (!strncmp(name, "rcu_", 4)) + continue; + if (!strcmp(name, "bpf_dispatcher_xdp_func")) + continue; + if (!strncmp(name, "__ftrace_invalid_address__", + sizeof("__ftrace_invalid_address__") - 1)) + continue; + + err = hashmap__add(map, name, NULL); + if (err == -EEXIST) + continue; + if (err) + goto error; + + err = libbpf_ensure_mem((void **) &syms, &cap, + sizeof(*syms), cnt + 1); + if (err) + goto error; + + syms[cnt++] = name; + name = NULL; + } + + *symsp = syms; + *cntp = cnt; + +error: + free(name); + fclose(f); + hashmap__free(map); + if (err) { + for (i = 0; i < cnt; i++) + free(syms[i]); + free(syms); + } + return err; +} + +void serial_test_kprobe_multi_bench_attach(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct kprobe_multi_empty *skel = NULL; + long attach_start_ns, attach_end_ns; + long detach_start_ns, detach_end_ns; + double attach_delta, detach_delta; + struct bpf_link *link = NULL; + char **syms = NULL; + size_t cnt = 0, i; + + if (!ASSERT_OK(get_syms(&syms, &cnt), "get_syms")) + return; + + skel = kprobe_multi_empty__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load")) + goto cleanup; + + opts.syms = (const char **) syms; + opts.cnt = cnt; + + attach_start_ns = get_time_ns(); + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_empty, + NULL, &opts); + attach_end_ns = get_time_ns(); + + if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + detach_start_ns = get_time_ns(); + bpf_link__destroy(link); + detach_end_ns = get_time_ns(); + + attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; + detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + + printf("%s: found %lu functions\n", __func__, cnt); + printf("%s: attached in %7.3lfs\n", __func__, attach_delta); + printf("%s: detached in %7.3lfs\n", __func__, detach_delta); + +cleanup: + kprobe_multi_empty__destroy(skel); + if (syms) { + for (i = 0; i < cnt; i++) + free(syms[i]); + free(syms); + } +} + +void test_kprobe_multi_test(void) +{ + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + return; + + if (test__start_subtest("skel_api")) + test_skel_api(); + if (test__start_subtest("link_api_addrs")) + test_link_api_syms(); + if (test__start_subtest("link_api_syms")) + test_link_api_addrs(); + if (test__start_subtest("attach_api_pattern")) + test_attach_api_pattern(); + if (test__start_subtest("attach_api_addrs")) + test_attach_api_addrs(); + if (test__start_subtest("attach_api_syms")) + test_attach_api_syms(); + if (test__start_subtest("attach_api_fails")) + test_attach_api_fails(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms.c b/tools/testing/selftests/bpf/prog_tests/ksyms.c new file mode 100644 index 000000000..b295969b2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ksyms.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <test_progs.h> +#include "test_ksyms.skel.h" +#include <sys/stat.h> + +static int duration; + +void test_ksyms(void) +{ + const char *btf_path = "/sys/kernel/btf/vmlinux"; + struct test_ksyms *skel; + struct test_ksyms__data *data; + __u64 link_fops_addr, per_cpu_start_addr; + struct stat st; + __u64 btf_size; + int err; + + err = kallsyms_find("bpf_link_fops", &link_fops_addr); + if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno)) + return; + if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_link_fops' not found\n")) + return; + + err = kallsyms_find("__per_cpu_start", &per_cpu_start_addr); + if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno)) + return; + if (CHECK(err == -ENOENT, "ksym_find", "symbol 'per_cpu_start' not found\n")) + return; + + if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno)) + return; + btf_size = st.st_size; + + skel = test_ksyms__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n")) + return; + + err = test_ksyms__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + data = skel->data; + CHECK(data->out__bpf_link_fops != link_fops_addr, "bpf_link_fops", + "got 0x%llx, exp 0x%llx\n", + data->out__bpf_link_fops, link_fops_addr); + CHECK(data->out__bpf_link_fops1 != 0, "bpf_link_fops1", + "got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0); + CHECK(data->out__btf_size != btf_size, "btf_size", + "got %llu, exp %llu\n", data->out__btf_size, btf_size); + CHECK(data->out__per_cpu_start != per_cpu_start_addr, "__per_cpu_start", + "got %llu, exp %llu\n", data->out__per_cpu_start, + per_cpu_start_addr); + +cleanup: + test_ksyms__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c new file mode 100644 index 000000000..1d7a2f1e0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Google */ + +#include <test_progs.h> +#include <bpf/libbpf.h> +#include <bpf/btf.h> +#include "test_ksyms_btf.skel.h" +#include "test_ksyms_btf_null_check.skel.h" +#include "test_ksyms_weak.skel.h" +#include "test_ksyms_weak.lskel.h" +#include "test_ksyms_btf_write_check.skel.h" + +static int duration; + +static void test_basic(void) +{ + __u64 runqueues_addr, bpf_prog_active_addr; + __u32 this_rq_cpu; + int this_bpf_prog_active; + struct test_ksyms_btf *skel = NULL; + struct test_ksyms_btf__data *data; + int err; + + err = kallsyms_find("runqueues", &runqueues_addr); + if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno)) + return; + if (CHECK(err == -ENOENT, "ksym_find", "symbol 'runqueues' not found\n")) + return; + + err = kallsyms_find("bpf_prog_active", &bpf_prog_active_addr); + if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno)) + return; + if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_prog_active' not found\n")) + return; + + skel = test_ksyms_btf__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n")) + goto cleanup; + + err = test_ksyms_btf__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + data = skel->data; + CHECK(data->out__runqueues_addr != runqueues_addr, "runqueues_addr", + "got %llu, exp %llu\n", + (unsigned long long)data->out__runqueues_addr, + (unsigned long long)runqueues_addr); + CHECK(data->out__bpf_prog_active_addr != bpf_prog_active_addr, "bpf_prog_active_addr", + "got %llu, exp %llu\n", + (unsigned long long)data->out__bpf_prog_active_addr, + (unsigned long long)bpf_prog_active_addr); + + CHECK(data->out__rq_cpu == -1, "rq_cpu", + "got %u, exp != -1\n", data->out__rq_cpu); + CHECK(data->out__bpf_prog_active < 0, "bpf_prog_active", + "got %d, exp >= 0\n", data->out__bpf_prog_active); + CHECK(data->out__cpu_0_rq_cpu != 0, "cpu_rq(0)->cpu", + "got %u, exp 0\n", data->out__cpu_0_rq_cpu); + + this_rq_cpu = data->out__this_rq_cpu; + CHECK(this_rq_cpu != data->out__rq_cpu, "this_rq_cpu", + "got %u, exp %u\n", this_rq_cpu, data->out__rq_cpu); + + this_bpf_prog_active = data->out__this_bpf_prog_active; + CHECK(this_bpf_prog_active != data->out__bpf_prog_active, "this_bpf_prog_active", + "got %d, exp %d\n", this_bpf_prog_active, + data->out__bpf_prog_active); + +cleanup: + test_ksyms_btf__destroy(skel); +} + +static void test_null_check(void) +{ + struct test_ksyms_btf_null_check *skel; + + skel = test_ksyms_btf_null_check__open_and_load(); + CHECK(skel, "skel_open", "unexpected load of a prog missing null check\n"); + + test_ksyms_btf_null_check__destroy(skel); +} + +static void test_weak_syms(void) +{ + struct test_ksyms_weak *skel; + struct test_ksyms_weak__data *data; + int err; + + skel = test_ksyms_weak__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load")) + return; + + err = test_ksyms_weak__attach(skel); + if (!ASSERT_OK(err, "test_ksyms_weak__attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + data = skel->data; + ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym"); + ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym"); + ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym"); + ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym"); + +cleanup: + test_ksyms_weak__destroy(skel); +} + +static void test_weak_syms_lskel(void) +{ + struct test_ksyms_weak_lskel *skel; + struct test_ksyms_weak_lskel__data *data; + int err; + + skel = test_ksyms_weak_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_weak_lskel__open_and_load")) + return; + + err = test_ksyms_weak_lskel__attach(skel); + if (!ASSERT_OK(err, "test_ksyms_weak_lskel__attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + data = skel->data; + ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym"); + ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym"); + ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym"); + ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym"); + +cleanup: + test_ksyms_weak_lskel__destroy(skel); +} + +static void test_write_check(bool test_handler1) +{ + struct test_ksyms_btf_write_check *skel; + + skel = test_ksyms_btf_write_check__open(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_btf_write_check__open")) + return; + bpf_program__set_autoload(test_handler1 ? skel->progs.handler2 : skel->progs.handler1, false); + ASSERT_ERR(test_ksyms_btf_write_check__load(skel), + "unexpected load of a prog writing to ksym memory\n"); + + test_ksyms_btf_write_check__destroy(skel); +} + +void test_ksyms_btf(void) +{ + int percpu_datasec; + struct btf *btf; + + btf = libbpf_find_kernel_btf(); + if (!ASSERT_OK_PTR(btf, "btf_exists")) + return; + + percpu_datasec = btf__find_by_name_kind(btf, ".data..percpu", + BTF_KIND_DATASEC); + btf__free(btf); + if (percpu_datasec < 0) { + printf("%s:SKIP:no PERCPU DATASEC in kernel btf\n", + __func__); + test__skip(); + return; + } + + if (test__start_subtest("basic")) + test_basic(); + + if (test__start_subtest("null_check")) + test_null_check(); + + if (test__start_subtest("weak_ksyms")) + test_weak_syms(); + + if (test__start_subtest("weak_ksyms_lskel")) + test_weak_syms_lskel(); + + if (test__start_subtest("write_check1")) + test_write_check(true); + + if (test__start_subtest("write_check2")) + test_write_check(false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c new file mode 100644 index 000000000..a1ebac70e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <network_helpers.h> +#include "test_ksyms_module.lskel.h" +#include "test_ksyms_module.skel.h" + +static void test_ksyms_module_lskel(void) +{ + struct test_ksyms_module_lskel *skel; + int err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + if (!env.has_testmod) { + test__skip(); + return; + } + + skel = test_ksyms_module_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_module_lskel__open_and_load")) + return; + err = bpf_prog_test_run_opts(skel->progs.load.prog_fd, &topts); + if (!ASSERT_OK(err, "bpf_prog_test_run")) + goto cleanup; + ASSERT_EQ(topts.retval, 0, "retval"); + ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); +cleanup: + test_ksyms_module_lskel__destroy(skel); +} + +static void test_ksyms_module_libbpf(void) +{ + struct test_ksyms_module *skel; + int err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + if (!env.has_testmod) { + test__skip(); + return; + } + + skel = test_ksyms_module__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open")) + return; + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.load), &topts); + if (!ASSERT_OK(err, "bpf_prog_test_run")) + goto cleanup; + ASSERT_EQ(topts.retval, 0, "retval"); + ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); +cleanup: + test_ksyms_module__destroy(skel); +} + +void test_ksyms_module(void) +{ + if (test__start_subtest("lskel")) + test_ksyms_module_lskel(); + if (test__start_subtest("libbpf")) + test_ksyms_module_libbpf(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c new file mode 100644 index 000000000..9c1a18573 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +static void test_l4lb(const char *file) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + struct vip key = {.protocol = 6}; + struct vip_meta { + __u32 flags; + __u32 vip_num; + } value = {.vip_num = VIP_NUM}; + __u32 stats_key = VIP_NUM; + struct vip_stats { + __u64 bytes; + __u64 pkts; + } stats[nr_cpus]; + struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; + } real_def = {.dst = MAGIC_VAL}; + __u32 ch_key = 11, real_num = 3; + int err, i, prog_fd, map_fd; + __u64 bytes = 0, pkts = 0; + struct bpf_object *obj; + char buf[128]; + u32 *magic = (u32 *)buf; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = NUM_ITER, + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + map_fd = bpf_find_map(__func__, obj, "vip_map"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &key, &value, 0); + + map_fd = bpf_find_map(__func__, obj, "ch_rings"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); + + map_fd = bpf_find_map(__func__, obj, "reals"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &real_num, &real_def, 0); + + topts.data_in = &pkt_v4; + topts.data_size_in = sizeof(pkt_v4); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv4 test_run retval"); + ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out"); + ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 magic"); + + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = sizeof(buf); /* reset out size */ + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv6 test_run retval"); + ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out"); + ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 magic"); + + map_fd = bpf_find_map(__func__, obj, "stats"); + if (map_fd < 0) + goto out; + bpf_map_lookup_elem(map_fd, &stats_key, stats); + for (i = 0; i < nr_cpus; i++) { + bytes += stats[i].bytes; + pkts += stats[i].pkts; + } + if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 || + pkts != NUM_ITER * 2)) + printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); +out: + bpf_object__close(obj); +} + +void test_l4lb_all(void) +{ + if (test__start_subtest("l4lb_inline")) + test_l4lb("test_l4lb.bpf.o"); + if (test__start_subtest("l4lb_noinline")) + test_l4lb("test_l4lb_noinline.bpf.o"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/legacy_printk.c b/tools/testing/selftests/bpf/prog_tests/legacy_printk.c new file mode 100644 index 000000000..ec6e45f2a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/legacy_printk.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "test_legacy_printk.skel.h" + +static int execute_one_variant(bool legacy) +{ + struct test_legacy_printk *skel; + int err, zero = 0, my_pid = getpid(), res, map_fd; + + skel = test_legacy_printk__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return -errno; + + bpf_program__set_autoload(skel->progs.handle_legacy, legacy); + bpf_program__set_autoload(skel->progs.handle_modern, !legacy); + + err = test_legacy_printk__load(skel); + /* no ASSERT_OK, we expect one of two variants can fail here */ + if (err) + goto err_out; + + if (legacy) { + map_fd = bpf_map__fd(skel->maps.my_pid_map); + err = bpf_map_update_elem(map_fd, &zero, &my_pid, BPF_ANY); + if (!ASSERT_OK(err, "my_pid_map_update")) + goto err_out; + err = bpf_map_lookup_elem(map_fd, &zero, &res); + } else { + skel->bss->my_pid_var = my_pid; + } + + err = test_legacy_printk__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto err_out; + + usleep(1); /* trigger */ + + if (legacy) { + map_fd = bpf_map__fd(skel->maps.res_map); + err = bpf_map_lookup_elem(map_fd, &zero, &res); + if (!ASSERT_OK(err, "res_map_lookup")) + goto err_out; + } else { + res = skel->bss->res_var; + } + + if (!ASSERT_GT(res, 0, "res")) { + err = -EINVAL; + goto err_out; + } + +err_out: + test_legacy_printk__destroy(skel); + return err; +} + +void test_legacy_printk(void) +{ + /* legacy variant should work everywhere */ + ASSERT_OK(execute_one_variant(true /* legacy */), "legacy_case"); + + /* execute modern variant, can fail the load on old kernels */ + execute_one_variant(false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c new file mode 100644 index 000000000..9f766ddd9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <bpf/btf.h> + +void test_libbpf_probe_prog_types(void) +{ + struct btf *btf; + const struct btf_type *t; + const struct btf_enum *e; + int i, n, id; + + btf = btf__parse("/sys/kernel/btf/vmlinux", NULL); + if (!ASSERT_OK_PTR(btf, "btf_parse")) + return; + + /* find enum bpf_prog_type and enumerate each value */ + id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM); + if (!ASSERT_GT(id, 0, "bpf_prog_type_id")) + goto cleanup; + t = btf__type_by_id(btf, id); + if (!ASSERT_OK_PTR(t, "bpf_prog_type_enum")) + goto cleanup; + + for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) { + const char *prog_type_name = btf__str_by_offset(btf, e->name_off); + enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val; + int res; + + if (prog_type == BPF_PROG_TYPE_UNSPEC) + continue; + + if (!test__start_subtest(prog_type_name)) + continue; + + res = libbpf_probe_bpf_prog_type(prog_type, NULL); + ASSERT_EQ(res, 1, prog_type_name); + } + +cleanup: + btf__free(btf); +} + +void test_libbpf_probe_map_types(void) +{ + struct btf *btf; + const struct btf_type *t; + const struct btf_enum *e; + int i, n, id; + + btf = btf__parse("/sys/kernel/btf/vmlinux", NULL); + if (!ASSERT_OK_PTR(btf, "btf_parse")) + return; + + /* find enum bpf_map_type and enumerate each value */ + id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM); + if (!ASSERT_GT(id, 0, "bpf_map_type_id")) + goto cleanup; + t = btf__type_by_id(btf, id); + if (!ASSERT_OK_PTR(t, "bpf_map_type_enum")) + goto cleanup; + + for (e = btf_enum(t), i = 0, n = btf_vlen(t); i < n; e++, i++) { + const char *map_type_name = btf__str_by_offset(btf, e->name_off); + enum bpf_map_type map_type = (enum bpf_map_type)e->val; + int res; + + if (map_type == BPF_MAP_TYPE_UNSPEC) + continue; + + if (!test__start_subtest(map_type_name)) + continue; + + res = libbpf_probe_bpf_map_type(map_type, NULL); + ASSERT_EQ(res, 1, map_type_name); + } + +cleanup: + btf__free(btf); +} + +void test_libbpf_probe_helpers(void) +{ +#define CASE(prog, helper, supp) { \ + .prog_type_name = "BPF_PROG_TYPE_" # prog, \ + .helper_name = "bpf_" # helper, \ + .prog_type = BPF_PROG_TYPE_ ## prog, \ + .helper_id = BPF_FUNC_ ## helper, \ + .supported = supp, \ +} + const struct case_def { + const char *prog_type_name; + const char *helper_name; + enum bpf_prog_type prog_type; + enum bpf_func_id helper_id; + bool supported; + } cases[] = { + CASE(KPROBE, unspec, false), + CASE(KPROBE, map_lookup_elem, true), + CASE(KPROBE, loop, true), + + CASE(KPROBE, ktime_get_coarse_ns, false), + CASE(SOCKET_FILTER, ktime_get_coarse_ns, true), + + CASE(KPROBE, sys_bpf, false), + CASE(SYSCALL, sys_bpf, true), + }; + size_t case_cnt = ARRAY_SIZE(cases), i; + char buf[128]; + + for (i = 0; i < case_cnt; i++) { + const struct case_def *d = &cases[i]; + int res; + + snprintf(buf, sizeof(buf), "%s+%s", d->prog_type_name, d->helper_name); + + if (!test__start_subtest(buf)) + continue; + + res = libbpf_probe_bpf_helper(d->prog_type, d->helper_id, NULL); + ASSERT_EQ(res, d->supported, buf); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c new file mode 100644 index 000000000..93e9cddaa --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <ctype.h> +#include <test_progs.h> +#include <bpf/btf.h> + +/* + * Utility function uppercasing an entire string. + */ +static void uppercase(char *s) +{ + for (; *s != '\0'; s++) + *s = toupper(*s); +} + +/* + * Test case to check that all bpf_attach_type variants are covered by + * libbpf_bpf_attach_type_str. + */ +static void test_libbpf_bpf_attach_type_str(void) +{ + struct btf *btf; + const struct btf_type *t; + const struct btf_enum *e; + int i, n, id; + + btf = btf__parse("/sys/kernel/btf/vmlinux", NULL); + if (!ASSERT_OK_PTR(btf, "btf_parse")) + return; + + /* find enum bpf_attach_type and enumerate each value */ + id = btf__find_by_name_kind(btf, "bpf_attach_type", BTF_KIND_ENUM); + if (!ASSERT_GT(id, 0, "bpf_attach_type_id")) + goto cleanup; + t = btf__type_by_id(btf, id); + e = btf_enum(t); + n = btf_vlen(t); + for (i = 0; i < n; e++, i++) { + enum bpf_attach_type attach_type = (enum bpf_attach_type)e->val; + const char *attach_type_name; + const char *attach_type_str; + char buf[256]; + + if (attach_type == __MAX_BPF_ATTACH_TYPE) + continue; + + attach_type_name = btf__str_by_offset(btf, e->name_off); + attach_type_str = libbpf_bpf_attach_type_str(attach_type); + ASSERT_OK_PTR(attach_type_str, attach_type_name); + + snprintf(buf, sizeof(buf), "BPF_%s", attach_type_str); + uppercase(buf); + + ASSERT_STREQ(buf, attach_type_name, "exp_str_value"); + } + +cleanup: + btf__free(btf); +} + +/* + * Test case to check that all bpf_link_type variants are covered by + * libbpf_bpf_link_type_str. + */ +static void test_libbpf_bpf_link_type_str(void) +{ + struct btf *btf; + const struct btf_type *t; + const struct btf_enum *e; + int i, n, id; + + btf = btf__parse("/sys/kernel/btf/vmlinux", NULL); + if (!ASSERT_OK_PTR(btf, "btf_parse")) + return; + + /* find enum bpf_link_type and enumerate each value */ + id = btf__find_by_name_kind(btf, "bpf_link_type", BTF_KIND_ENUM); + if (!ASSERT_GT(id, 0, "bpf_link_type_id")) + goto cleanup; + t = btf__type_by_id(btf, id); + e = btf_enum(t); + n = btf_vlen(t); + for (i = 0; i < n; e++, i++) { + enum bpf_link_type link_type = (enum bpf_link_type)e->val; + const char *link_type_name; + const char *link_type_str; + char buf[256]; + + if (link_type == MAX_BPF_LINK_TYPE) + continue; + + link_type_name = btf__str_by_offset(btf, e->name_off); + link_type_str = libbpf_bpf_link_type_str(link_type); + ASSERT_OK_PTR(link_type_str, link_type_name); + + snprintf(buf, sizeof(buf), "BPF_LINK_TYPE_%s", link_type_str); + uppercase(buf); + + ASSERT_STREQ(buf, link_type_name, "exp_str_value"); + } + +cleanup: + btf__free(btf); +} + +/* + * Test case to check that all bpf_map_type variants are covered by + * libbpf_bpf_map_type_str. + */ +static void test_libbpf_bpf_map_type_str(void) +{ + struct btf *btf; + const struct btf_type *t; + const struct btf_enum *e; + int i, n, id; + + btf = btf__parse("/sys/kernel/btf/vmlinux", NULL); + if (!ASSERT_OK_PTR(btf, "btf_parse")) + return; + + /* find enum bpf_map_type and enumerate each value */ + id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM); + if (!ASSERT_GT(id, 0, "bpf_map_type_id")) + goto cleanup; + t = btf__type_by_id(btf, id); + e = btf_enum(t); + n = btf_vlen(t); + for (i = 0; i < n; e++, i++) { + enum bpf_map_type map_type = (enum bpf_map_type)e->val; + const char *map_type_name; + const char *map_type_str; + char buf[256]; + + map_type_name = btf__str_by_offset(btf, e->name_off); + map_type_str = libbpf_bpf_map_type_str(map_type); + ASSERT_OK_PTR(map_type_str, map_type_name); + + snprintf(buf, sizeof(buf), "BPF_MAP_TYPE_%s", map_type_str); + uppercase(buf); + + ASSERT_STREQ(buf, map_type_name, "exp_str_value"); + } + +cleanup: + btf__free(btf); +} + +/* + * Test case to check that all bpf_prog_type variants are covered by + * libbpf_bpf_prog_type_str. + */ +static void test_libbpf_bpf_prog_type_str(void) +{ + struct btf *btf; + const struct btf_type *t; + const struct btf_enum *e; + int i, n, id; + + btf = btf__parse("/sys/kernel/btf/vmlinux", NULL); + if (!ASSERT_OK_PTR(btf, "btf_parse")) + return; + + /* find enum bpf_prog_type and enumerate each value */ + id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM); + if (!ASSERT_GT(id, 0, "bpf_prog_type_id")) + goto cleanup; + t = btf__type_by_id(btf, id); + e = btf_enum(t); + n = btf_vlen(t); + for (i = 0; i < n; e++, i++) { + enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val; + const char *prog_type_name; + const char *prog_type_str; + char buf[256]; + + prog_type_name = btf__str_by_offset(btf, e->name_off); + prog_type_str = libbpf_bpf_prog_type_str(prog_type); + ASSERT_OK_PTR(prog_type_str, prog_type_name); + + snprintf(buf, sizeof(buf), "BPF_PROG_TYPE_%s", prog_type_str); + uppercase(buf); + + ASSERT_STREQ(buf, prog_type_name, "exp_str_value"); + } + +cleanup: + btf__free(btf); +} + +/* + * Run all libbpf str conversion tests. + */ +void test_libbpf_str(void) +{ + if (test__start_subtest("bpf_attach_type_str")) + test_libbpf_bpf_attach_type_str(); + + if (test__start_subtest("bpf_link_type_str")) + test_libbpf_bpf_link_type_str(); + + if (test__start_subtest("bpf_map_type_str")) + test_libbpf_bpf_map_type_str(); + + if (test__start_subtest("bpf_prog_type_str")) + test_libbpf_bpf_prog_type_str(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/link_pinning.c b/tools/testing/selftests/bpf/prog_tests/link_pinning.c new file mode 100644 index 000000000..6fc97c45f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/link_pinning.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include <sys/stat.h> + +#include "test_link_pinning.skel.h" + +static int duration = 0; + +void test_link_pinning_subtest(struct bpf_program *prog, + struct test_link_pinning__bss *bss) +{ + const char *link_pin_path = "/sys/fs/bpf/pinned_link_test"; + struct stat statbuf = {}; + struct bpf_link *link; + int err, i; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + bss->in = 1; + usleep(1); + CHECK(bss->out != 1, "res_check1", "exp %d, got %d\n", 1, bss->out); + + /* pin link */ + err = bpf_link__pin(link, link_pin_path); + if (CHECK(err, "link_pin", "err: %d\n", err)) + goto cleanup; + + CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path1", + "exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link)); + + /* check that link was pinned */ + err = stat(link_pin_path, &statbuf); + if (CHECK(err, "stat_link", "err %d errno %d\n", err, errno)) + goto cleanup; + + bss->in = 2; + usleep(1); + CHECK(bss->out != 2, "res_check2", "exp %d, got %d\n", 2, bss->out); + + /* destroy link, pinned link should keep program attached */ + bpf_link__destroy(link); + link = NULL; + + bss->in = 3; + usleep(1); + CHECK(bss->out != 3, "res_check3", "exp %d, got %d\n", 3, bss->out); + + /* re-open link from BPFFS */ + link = bpf_link__open(link_pin_path); + if (!ASSERT_OK_PTR(link, "link_open")) + goto cleanup; + + CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path2", + "exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link)); + + /* unpin link from BPFFS, program still attached */ + err = bpf_link__unpin(link); + if (CHECK(err, "link_unpin", "err: %d\n", err)) + goto cleanup; + + /* still active, as we have FD open now */ + bss->in = 4; + usleep(1); + CHECK(bss->out != 4, "res_check4", "exp %d, got %d\n", 4, bss->out); + + bpf_link__destroy(link); + link = NULL; + + /* Validate it's finally detached. + * Actual detachment might get delayed a bit, so there is no reliable + * way to validate it immediately here, let's count up for long enough + * and see if eventually output stops being updated + */ + for (i = 5; i < 10000; i++) { + bss->in = i; + usleep(1); + if (bss->out == i - 1) + break; + } + CHECK(i == 10000, "link_attached", "got to iteration #%d\n", i); + +cleanup: + bpf_link__destroy(link); +} + +void test_link_pinning(void) +{ + struct test_link_pinning* skel; + + skel = test_link_pinning__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + if (test__start_subtest("pin_raw_tp")) + test_link_pinning_subtest(skel->progs.raw_tp_prog, skel->bss); + if (test__start_subtest("pin_tp_btf")) + test_link_pinning_subtest(skel->progs.tp_btf_prog, skel->bss); + + test_link_pinning__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c new file mode 100644 index 000000000..cad664546 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <sys/syscall.h> +#include "linked_funcs.skel.h" + +void test_linked_funcs(void) +{ + int err; + struct linked_funcs *skel; + + skel = linked_funcs__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + /* handler1 and handler2 are marked as SEC("?raw_tp/sys_enter") and + * are set to not autoload by default + */ + bpf_program__set_autoload(skel->progs.handler1, true); + bpf_program__set_autoload(skel->progs.handler2, true); + + skel->rodata->my_tid = syscall(SYS_gettid); + skel->bss->syscall_id = SYS_getpgid; + + err = linked_funcs__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + err = linked_funcs__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* trigger */ + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->output_val1, 2000 + 2000, "output_val1"); + ASSERT_EQ(skel->bss->output_ctx1, SYS_getpgid, "output_ctx1"); + ASSERT_EQ(skel->bss->output_weak1, 42, "output_weak1"); + + ASSERT_EQ(skel->bss->output_val2, 2 * 1000 + 2 * (2 * 1000), "output_val2"); + ASSERT_EQ(skel->bss->output_ctx2, SYS_getpgid, "output_ctx2"); + /* output_weak2 should never be updated */ + ASSERT_EQ(skel->bss->output_weak2, 0, "output_weak2"); + +cleanup: + linked_funcs__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/linked_maps.c b/tools/testing/selftests/bpf/prog_tests/linked_maps.c new file mode 100644 index 000000000..85dcaaaf2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/linked_maps.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <sys/syscall.h> +#include "linked_maps.skel.h" + +void test_linked_maps(void) +{ + int err; + struct linked_maps *skel; + + skel = linked_maps__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + err = linked_maps__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* trigger */ + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->output_first1, 2000, "output_first1"); + ASSERT_EQ(skel->bss->output_second1, 2, "output_second1"); + ASSERT_EQ(skel->bss->output_weak1, 2, "output_weak1"); + +cleanup: + linked_maps__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/linked_vars.c b/tools/testing/selftests/bpf/prog_tests/linked_vars.c new file mode 100644 index 000000000..267166abe --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/linked_vars.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <sys/syscall.h> +#include "linked_vars.skel.h" + +void test_linked_vars(void) +{ + int err; + struct linked_vars *skel; + + skel = linked_vars__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->input_bss1 = 1000; + skel->bss->input_bss2 = 2000; + skel->bss->input_bss_weak = 3000; + + err = linked_vars__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + err = linked_vars__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* trigger */ + syscall(SYS_getpgid); + + ASSERT_EQ(skel->bss->output_bss1, 1000 + 2000 + 3000, "output_bss1"); + ASSERT_EQ(skel->bss->output_bss2, 1000 + 2000 + 3000, "output_bss2"); + /* 10 comes from "winner" input_data_weak in first obj file */ + ASSERT_EQ(skel->bss->output_data1, 1 + 2 + 10, "output_bss1"); + ASSERT_EQ(skel->bss->output_data2, 1 + 2 + 10, "output_bss2"); + /* 100 comes from "winner" input_rodata_weak in first obj file */ + ASSERT_EQ(skel->bss->output_rodata1, 11 + 22 + 100, "output_weak1"); + ASSERT_EQ(skel->bss->output_rodata2, 11 + 22 + 100, "output_weak2"); + +cleanup: + linked_vars__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c new file mode 100644 index 000000000..581c0eb0a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2020 Google LLC. + */ + +#include <test_progs.h> +#include <network_helpers.h> + +void test_load_bytes_relative(void) +{ + int server_fd, cgroup_fd, prog_fd, map_fd, client_fd; + int err; + struct bpf_object *obj; + struct bpf_program *prog; + struct bpf_map *test_result; + __u32 duration = 0; + + __u32 map_key = 0; + __u32 map_value = 0; + + cgroup_fd = test__join_cgroup("/load_bytes_relative"); + if (CHECK_FAIL(cgroup_fd < 0)) + return; + + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); + if (CHECK_FAIL(server_fd < 0)) + goto close_cgroup_fd; + + err = bpf_prog_test_load("./load_bytes_relative.bpf.o", BPF_PROG_TYPE_CGROUP_SKB, + &obj, &prog_fd); + if (CHECK_FAIL(err)) + goto close_server_fd; + + test_result = bpf_object__find_map_by_name(obj, "test_result"); + if (CHECK_FAIL(!test_result)) + goto close_bpf_object; + + map_fd = bpf_map__fd(test_result); + if (map_fd < 0) + goto close_bpf_object; + + prog = bpf_object__find_program_by_name(obj, "load_bytes_relative"); + if (CHECK_FAIL(!prog)) + goto close_bpf_object; + + err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, + BPF_F_ALLOW_MULTI); + if (CHECK_FAIL(err)) + goto close_bpf_object; + + client_fd = connect_to_fd(server_fd, 0); + if (CHECK_FAIL(client_fd < 0)) + goto close_bpf_object; + close(client_fd); + + err = bpf_map_lookup_elem(map_fd, &map_key, &map_value); + if (CHECK_FAIL(err)) + goto close_bpf_object; + + CHECK(map_value != 1, "bpf", "bpf program returned failure"); + +close_bpf_object: + bpf_object__close(obj); + +close_server_fd: + close(server_fd); + +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c new file mode 100644 index 000000000..fe9a23e65 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> +#include <bpf/btf.h> + +#include "test_log_buf.skel.h" + +static size_t libbpf_log_pos; +static char libbpf_log_buf[1024 * 1024]; +static bool libbpf_log_error; + +static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, va_list args) +{ + int emitted_cnt; + size_t left_cnt; + + left_cnt = sizeof(libbpf_log_buf) - libbpf_log_pos; + emitted_cnt = vsnprintf(libbpf_log_buf + libbpf_log_pos, left_cnt, fmt, args); + + if (emitted_cnt < 0 || emitted_cnt + 1 > left_cnt) { + libbpf_log_error = true; + return 0; + } + + libbpf_log_pos += emitted_cnt; + return 0; +} + +static void obj_load_log_buf(void) +{ + libbpf_print_fn_t old_print_cb = libbpf_set_print(libbpf_print_cb); + LIBBPF_OPTS(bpf_object_open_opts, opts); + const size_t log_buf_sz = 1024 * 1024; + struct test_log_buf* skel; + char *obj_log_buf, *good_log_buf, *bad_log_buf; + int err; + + obj_log_buf = malloc(3 * log_buf_sz); + if (!ASSERT_OK_PTR(obj_log_buf, "obj_log_buf")) + return; + + good_log_buf = obj_log_buf + log_buf_sz; + bad_log_buf = obj_log_buf + 2 * log_buf_sz; + obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0'; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 4; /* for BTF this will turn into 1 */ + + /* In the first round every prog has its own log_buf, so libbpf logs + * don't have program failure logs + */ + skel = test_log_buf__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + /* set very verbose level for good_prog so we always get detailed logs */ + bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz); + bpf_program__set_log_level(skel->progs.good_prog, 2); + + bpf_program__set_log_buf(skel->progs.bad_prog, bad_log_buf, log_buf_sz); + /* log_level 0 with custom log_buf means that verbose logs are not + * requested if program load is successful, but libbpf should retry + * with log_level 1 on error and put program's verbose load log into + * custom log_buf + */ + bpf_program__set_log_level(skel->progs.bad_prog, 0); + + err = test_log_buf__load(skel); + if (!ASSERT_ERR(err, "unexpected_load_success")) + goto cleanup; + + ASSERT_FALSE(libbpf_log_error, "libbpf_log_error"); + + /* there should be no prog loading log because we specified per-prog log buf */ + ASSERT_NULL(strstr(libbpf_log_buf, "-- BEGIN PROG LOAD LOG --"), "unexp_libbpf_log"); + ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"), + "libbpf_log_not_empty"); + ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty"); + ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), + "good_log_verbose"); + ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"), + "bad_log_not_empty"); + + if (env.verbosity > VERBOSE_NONE) { + printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf); + printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf); + printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf); + printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf); + } + + /* reset everything */ + test_log_buf__destroy(skel); + obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0'; + libbpf_log_buf[0] = '\0'; + libbpf_log_pos = 0; + libbpf_log_error = false; + + /* In the second round we let bad_prog's failure be logged through print callback */ + opts.kernel_log_buf = NULL; /* let everything through into print callback */ + opts.kernel_log_size = 0; + opts.kernel_log_level = 1; + + skel = test_log_buf__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + /* set normal verbose level for good_prog to check log_level is taken into account */ + bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz); + bpf_program__set_log_level(skel->progs.good_prog, 1); + + err = test_log_buf__load(skel); + if (!ASSERT_ERR(err, "unexpected_load_success")) + goto cleanup; + + ASSERT_FALSE(libbpf_log_error, "libbpf_log_error"); + + /* this time prog loading error should be logged through print callback */ + ASSERT_OK_PTR(strstr(libbpf_log_buf, "libbpf: prog 'bad_prog': -- BEGIN PROG LOAD LOG --"), + "libbpf_log_correct"); + ASSERT_STREQ(obj_log_buf, "", "obj_log__empty"); + ASSERT_STREQ(good_log_buf, "processed 4 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n", + "good_log_ok"); + ASSERT_STREQ(bad_log_buf, "", "bad_log_empty"); + + if (env.verbosity > VERBOSE_NONE) { + printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf); + printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf); + printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf); + printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf); + } + +cleanup: + free(obj_log_buf); + test_log_buf__destroy(skel); + libbpf_set_print(old_print_cb); +} + +static void bpf_prog_load_log_buf(void) +{ + const struct bpf_insn good_prog_insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + const size_t good_prog_insn_cnt = sizeof(good_prog_insns) / sizeof(struct bpf_insn); + const struct bpf_insn bad_prog_insns[] = { + BPF_EXIT_INSN(), + }; + size_t bad_prog_insn_cnt = sizeof(bad_prog_insns) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, opts); + const size_t log_buf_sz = 1024 * 1024; + char *log_buf; + int fd = -1; + + log_buf = malloc(log_buf_sz); + if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc")) + return; + opts.log_buf = log_buf; + opts.log_size = log_buf_sz; + + /* with log_level == 0 log_buf shoud stay empty for good prog */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", + good_prog_insns, good_prog_insn_cnt, &opts); + ASSERT_STREQ(log_buf, "", "good_log_0"); + ASSERT_GE(fd, 0, "good_fd1"); + if (fd >= 0) + close(fd); + fd = -1; + + /* log_level == 2 should always fill log_buf, even for good prog */ + log_buf[0] = '\0'; + opts.log_level = 2; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", + good_prog_insns, good_prog_insn_cnt, &opts); + ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2"); + ASSERT_GE(fd, 0, "good_fd2"); + if (fd >= 0) + close(fd); + fd = -1; + + /* log_level == 0 should fill log_buf for bad prog */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "bad_prog", "GPL", + bad_prog_insns, bad_prog_insn_cnt, &opts); + ASSERT_OK_PTR(strstr(log_buf, "R0 !read_ok"), "bad_log_0"); + ASSERT_LT(fd, 0, "bad_fd"); + if (fd >= 0) + close(fd); + fd = -1; + + free(log_buf); +} + +static void bpf_btf_load_log_buf(void) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + const size_t log_buf_sz = 1024 * 1024; + const void *raw_btf_data; + __u32 raw_btf_size; + struct btf *btf; + char *log_buf = NULL; + int fd = -1; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_btf")) + return; + + ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type"); + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good")) + goto cleanup; + + log_buf = malloc(log_buf_sz); + if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc")) + goto cleanup; + opts.log_buf = log_buf; + opts.log_size = log_buf_sz; + + /* with log_level == 0 log_buf shoud stay empty for good BTF */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); + ASSERT_STREQ(log_buf, "", "good_log_0"); + ASSERT_GE(fd, 0, "good_fd1"); + if (fd >= 0) + close(fd); + fd = -1; + + /* log_level == 2 should always fill log_buf, even for good BTF */ + log_buf[0] = '\0'; + opts.log_level = 2; + fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); + printf("LOG_BUF: %s\n", log_buf); + ASSERT_OK_PTR(strstr(log_buf, "magic: 0xeb9f"), "good_log_2"); + ASSERT_GE(fd, 0, "good_fd2"); + if (fd >= 0) + close(fd); + fd = -1; + + /* make BTF bad, add pointer pointing to non-existing type */ + ASSERT_GT(btf__add_ptr(btf, 100), 0, "bad_ptr_type"); + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_bad")) + goto cleanup; + + /* log_level == 0 should fill log_buf for bad BTF */ + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts); + printf("LOG_BUF: %s\n", log_buf); + ASSERT_OK_PTR(strstr(log_buf, "[2] PTR (anon) type_id=100 Invalid type_id"), "bad_log_0"); + ASSERT_LT(fd, 0, "bad_fd"); + if (fd >= 0) + close(fd); + fd = -1; + +cleanup: + free(log_buf); + btf__free(btf); +} + +void test_log_buf(void) +{ + if (test__start_subtest("obj_load_log_buf")) + obj_load_log_buf(); + if (test__start_subtest("bpf_prog_load_log_buf")) + bpf_prog_load_log_buf(); + if (test__start_subtest("bpf_btf_load_log_buf")) + bpf_btf_load_log_buf(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c new file mode 100644 index 000000000..f4ffdcabf --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <bpf/btf.h> + +#include "test_log_fixup.skel.h" + +enum trunc_type { + TRUNC_NONE, + TRUNC_PARTIAL, + TRUNC_FULL, +}; + +static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.bad_relo, true); + memset(log_buf, 0, sizeof(log_buf)); + bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log_buf, + "0: <invalid CO-RE relocation>\n" + "failed to resolve CO-RE relocation <byte_sz> ", + "log_buf_part1"); + + switch (trunc_type) { + case TRUNC_NONE: + ASSERT_HAS_SUBSTR(log_buf, + "struct task_struct___bad.fake_field (0:1 @ offset 4)\n", + "log_buf_part2"); + ASSERT_HAS_SUBSTR(log_buf, + "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n", + "log_buf_end"); + break; + case TRUNC_PARTIAL: + /* we should get full libbpf message patch */ + ASSERT_HAS_SUBSTR(log_buf, + "struct task_struct___bad.fake_field (0:1 @ offset 4)\n", + "log_buf_part2"); + /* we shouldn't get full end of BPF verifier log */ + ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"), + "log_buf_end"); + break; + case TRUNC_FULL: + /* we shouldn't get second part of libbpf message patch */ + ASSERT_NULL(strstr(log_buf, "struct task_struct___bad.fake_field (0:1 @ offset 4)\n"), + "log_buf_part2"); + /* we shouldn't get full end of BPF verifier log */ + ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"), + "log_buf_end"); + break; + } + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); +cleanup: + test_log_fixup__destroy(skel); +} + +static void bad_core_relo_subprog(void) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.bad_relo_subprog, true); + bpf_program__set_log_buf(skel->progs.bad_relo_subprog, log_buf, sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log_buf, + ": <invalid CO-RE relocation>\n" + "failed to resolve CO-RE relocation <byte_off> ", + "log_buf"); + ASSERT_HAS_SUBSTR(log_buf, + "struct task_struct___bad.fake_field_subprog (0:2 @ offset 8)\n", + "log_buf"); + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); + +cleanup: + test_log_fixup__destroy(skel); +} + +static void missing_map(void) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_map__set_autocreate(skel->maps.missing_map, false); + + bpf_program__set_autoload(skel->progs.use_missing_map, true); + bpf_program__set_log_buf(skel->progs.use_missing_map, log_buf, sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_TRUE(bpf_map__autocreate(skel->maps.existing_map), "existing_map_autocreate"); + ASSERT_FALSE(bpf_map__autocreate(skel->maps.missing_map), "missing_map_autocreate"); + + ASSERT_HAS_SUBSTR(log_buf, + "8: <invalid BPF map reference>\n" + "BPF map 'missing_map' is referenced but wasn't created\n", + "log_buf"); + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); + +cleanup: + test_log_fixup__destroy(skel); +} + +void test_log_fixup(void) +{ + if (test__start_subtest("bad_core_relo_trunc_none")) + bad_core_relo(0, TRUNC_NONE /* full buf */); + if (test__start_subtest("bad_core_relo_trunc_partial")) + bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */); + if (test__start_subtest("bad_core_relo_trunc_full")) + bad_core_relo(250, TRUNC_FULL /* truncate also libbpf's message patch */); + if (test__start_subtest("bad_core_relo_subprog")) + bad_core_relo_subprog(); + if (test__start_subtest("missing_map")) + missing_map(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c new file mode 100644 index 000000000..a767bb4a2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <test_progs.h> +#include "test_lookup_and_delete.skel.h" + +#define START_VALUE 1234 +#define NEW_VALUE 4321 +#define MAX_ENTRIES 2 + +static int duration; +static int nr_cpus; + +static int fill_values(int map_fd) +{ + __u64 key, value = START_VALUE; + int err; + + for (key = 1; key < MAX_ENTRIES + 1; key++) { + err = bpf_map_update_elem(map_fd, &key, &value, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem")) + return -1; + } + + return 0; +} + +static int fill_values_percpu(int map_fd) +{ + __u64 key, value[nr_cpus]; + int i, err; + + for (i = 0; i < nr_cpus; i++) + value[i] = START_VALUE; + + for (key = 1; key < MAX_ENTRIES + 1; key++) { + err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem")) + return -1; + } + + return 0; +} + +static struct test_lookup_and_delete *setup_prog(enum bpf_map_type map_type, + int *map_fd) +{ + struct test_lookup_and_delete *skel; + int err; + + skel = test_lookup_and_delete__open(); + if (!ASSERT_OK_PTR(skel, "test_lookup_and_delete__open")) + return NULL; + + err = bpf_map__set_type(skel->maps.hash_map, map_type); + if (!ASSERT_OK(err, "bpf_map__set_type")) + goto cleanup; + + err = bpf_map__set_max_entries(skel->maps.hash_map, MAX_ENTRIES); + if (!ASSERT_OK(err, "bpf_map__set_max_entries")) + goto cleanup; + + err = test_lookup_and_delete__load(skel); + if (!ASSERT_OK(err, "test_lookup_and_delete__load")) + goto cleanup; + + *map_fd = bpf_map__fd(skel->maps.hash_map); + if (!ASSERT_GE(*map_fd, 0, "bpf_map__fd")) + goto cleanup; + + return skel; + +cleanup: + test_lookup_and_delete__destroy(skel); + return NULL; +} + +/* Triggers BPF program that updates map with given key and value */ +static int trigger_tp(struct test_lookup_and_delete *skel, __u64 key, + __u64 value) +{ + int err; + + skel->bss->set_pid = getpid(); + skel->bss->set_key = key; + skel->bss->set_value = value; + + err = test_lookup_and_delete__attach(skel); + if (!ASSERT_OK(err, "test_lookup_and_delete__attach")) + return -1; + + syscall(__NR_getpgid); + + test_lookup_and_delete__detach(skel); + + return 0; +} + +static void test_lookup_and_delete_hash(void) +{ + struct test_lookup_and_delete *skel; + __u64 key, value; + int map_fd, err; + + /* Setup program and fill the map. */ + skel = setup_prog(BPF_MAP_TYPE_HASH, &map_fd); + if (!ASSERT_OK_PTR(skel, "setup_prog")) + return; + + err = fill_values(map_fd); + if (!ASSERT_OK(err, "fill_values")) + goto cleanup; + + /* Lookup and delete element. */ + key = 1; + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), &value, sizeof(value), 0); + if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) + goto cleanup; + + /* Fetched value should match the initially set value. */ + if (CHECK(value != START_VALUE, "bpf_map_lookup_and_delete_elem", + "unexpected value=%lld\n", value)) + goto cleanup; + + /* Check that the entry is non existent. */ + err = bpf_map_lookup_elem(map_fd, &key, &value); + if (!ASSERT_ERR(err, "bpf_map_lookup_elem")) + goto cleanup; + +cleanup: + test_lookup_and_delete__destroy(skel); +} + +static void test_lookup_and_delete_percpu_hash(void) +{ + struct test_lookup_and_delete *skel; + __u64 key, val, value[nr_cpus]; + int map_fd, err, i; + + /* Setup program and fill the map. */ + skel = setup_prog(BPF_MAP_TYPE_PERCPU_HASH, &map_fd); + if (!ASSERT_OK_PTR(skel, "setup_prog")) + return; + + err = fill_values_percpu(map_fd); + if (!ASSERT_OK(err, "fill_values_percpu")) + goto cleanup; + + /* Lookup and delete element. */ + key = 1; + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), value, sizeof(value), 0); + if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) + goto cleanup; + + for (i = 0; i < nr_cpus; i++) { + val = value[i]; + + /* Fetched value should match the initially set value. */ + if (CHECK(val != START_VALUE, "map value", + "unexpected for cpu %d: %lld\n", i, val)) + goto cleanup; + } + + /* Check that the entry is non existent. */ + err = bpf_map_lookup_elem(map_fd, &key, value); + if (!ASSERT_ERR(err, "bpf_map_lookup_elem")) + goto cleanup; + +cleanup: + test_lookup_and_delete__destroy(skel); +} + +static void test_lookup_and_delete_lru_hash(void) +{ + struct test_lookup_and_delete *skel; + __u64 key, value; + int map_fd, err; + + /* Setup program and fill the LRU map. */ + skel = setup_prog(BPF_MAP_TYPE_LRU_HASH, &map_fd); + if (!ASSERT_OK_PTR(skel, "setup_prog")) + return; + + err = fill_values(map_fd); + if (!ASSERT_OK(err, "fill_values")) + goto cleanup; + + /* Insert new element at key=3, should reuse LRU element. */ + key = 3; + err = trigger_tp(skel, key, NEW_VALUE); + if (!ASSERT_OK(err, "trigger_tp")) + goto cleanup; + + /* Lookup and delete element 3. */ + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), &value, sizeof(value), 0); + if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) + goto cleanup; + + /* Value should match the new value. */ + if (CHECK(value != NEW_VALUE, "bpf_map_lookup_and_delete_elem", + "unexpected value=%lld\n", value)) + goto cleanup; + + /* Check that entries 3 and 1 are non existent. */ + err = bpf_map_lookup_elem(map_fd, &key, &value); + if (!ASSERT_ERR(err, "bpf_map_lookup_elem")) + goto cleanup; + + key = 1; + err = bpf_map_lookup_elem(map_fd, &key, &value); + if (!ASSERT_ERR(err, "bpf_map_lookup_elem")) + goto cleanup; + +cleanup: + test_lookup_and_delete__destroy(skel); +} + +static void test_lookup_and_delete_lru_percpu_hash(void) +{ + struct test_lookup_and_delete *skel; + __u64 key, val, value[nr_cpus]; + int map_fd, err, i, cpucnt = 0; + + /* Setup program and fill the LRU map. */ + skel = setup_prog(BPF_MAP_TYPE_LRU_PERCPU_HASH, &map_fd); + if (!ASSERT_OK_PTR(skel, "setup_prog")) + return; + + err = fill_values_percpu(map_fd); + if (!ASSERT_OK(err, "fill_values_percpu")) + goto cleanup; + + /* Insert new element at key=3, should reuse LRU element 1. */ + key = 3; + err = trigger_tp(skel, key, NEW_VALUE); + if (!ASSERT_OK(err, "trigger_tp")) + goto cleanup; + + /* Clean value. */ + for (i = 0; i < nr_cpus; i++) + value[i] = 0; + + /* Lookup and delete element 3. */ + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), value, sizeof(value), 0); + if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) + goto cleanup; + + /* Check if only one CPU has set the value. */ + for (i = 0; i < nr_cpus; i++) { + val = value[i]; + if (val) { + if (CHECK(val != NEW_VALUE, "map value", + "unexpected for cpu %d: %lld\n", i, val)) + goto cleanup; + cpucnt++; + } + } + if (CHECK(cpucnt != 1, "map value", "set for %d CPUs instead of 1!\n", + cpucnt)) + goto cleanup; + + /* Check that entries 3 and 1 are non existent. */ + err = bpf_map_lookup_elem(map_fd, &key, &value); + if (!ASSERT_ERR(err, "bpf_map_lookup_elem")) + goto cleanup; + + key = 1; + err = bpf_map_lookup_elem(map_fd, &key, &value); + if (!ASSERT_ERR(err, "bpf_map_lookup_elem")) + goto cleanup; + +cleanup: + test_lookup_and_delete__destroy(skel); +} + +void test_lookup_and_delete(void) +{ + nr_cpus = bpf_num_possible_cpus(); + + if (test__start_subtest("lookup_and_delete")) + test_lookup_and_delete_hash(); + if (test__start_subtest("lookup_and_delete_percpu")) + test_lookup_and_delete_percpu_hash(); + if (test__start_subtest("lookup_and_delete_lru")) + test_lookup_and_delete_lru_hash(); + if (test__start_subtest("lookup_and_delete_lru_percpu")) + test_lookup_and_delete_lru_percpu_hash(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lookup_key.c b/tools/testing/selftests/bpf/prog_tests/lookup_key.c new file mode 100644 index 000000000..68025e88f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lookup_key.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include <linux/keyctl.h> +#include <test_progs.h> + +#include "test_lookup_key.skel.h" + +#define KEY_LOOKUP_CREATE 0x01 +#define KEY_LOOKUP_PARTIAL 0x02 + +static bool kfunc_not_supported; + +static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, + va_list args) +{ + char *func; + + if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n")) + return 0; + + func = va_arg(args, char *); + + if (strcmp(func, "bpf_lookup_user_key") && strcmp(func, "bpf_key_put") && + strcmp(func, "bpf_lookup_system_key")) + return 0; + + kfunc_not_supported = true; + return 0; +} + +void test_lookup_key(void) +{ + libbpf_print_fn_t old_print_cb; + struct test_lookup_key *skel; + __u32 next_id; + int ret; + + skel = test_lookup_key__open(); + if (!ASSERT_OK_PTR(skel, "test_lookup_key__open")) + return; + + old_print_cb = libbpf_set_print(libbpf_print_cb); + ret = test_lookup_key__load(skel); + libbpf_set_print(old_print_cb); + + if (ret < 0 && kfunc_not_supported) { + printf("%s:SKIP:bpf_lookup_*_key(), bpf_key_put() kfuncs not supported\n", + __func__); + test__skip(); + goto close_prog; + } + + if (!ASSERT_OK(ret, "test_lookup_key__load")) + goto close_prog; + + ret = test_lookup_key__attach(skel); + if (!ASSERT_OK(ret, "test_lookup_key__attach")) + goto close_prog; + + skel->bss->monitored_pid = getpid(); + skel->bss->key_serial = KEY_SPEC_THREAD_KEYRING; + + /* The thread-specific keyring does not exist, this test fails. */ + skel->bss->flags = 0; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_LT(ret, 0, "bpf_prog_get_next_id")) + goto close_prog; + + /* Force creation of the thread-specific keyring, this test succeeds. */ + skel->bss->flags = KEY_LOOKUP_CREATE; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_OK(ret, "bpf_prog_get_next_id")) + goto close_prog; + + /* Pass both lookup flags for parameter validation. */ + skel->bss->flags = KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_OK(ret, "bpf_prog_get_next_id")) + goto close_prog; + + /* Pass invalid flags. */ + skel->bss->flags = UINT64_MAX; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_LT(ret, 0, "bpf_prog_get_next_id")) + goto close_prog; + + skel->bss->key_serial = 0; + skel->bss->key_id = 1; + + ret = bpf_prog_get_next_id(0, &next_id); + if (!ASSERT_OK(ret, "bpf_prog_get_next_id")) + goto close_prog; + + skel->bss->key_id = UINT32_MAX; + + ret = bpf_prog_get_next_id(0, &next_id); + ASSERT_LT(ret, 0, "bpf_prog_get_next_id"); + +close_prog: + skel->bss->monitored_pid = 0; + test_lookup_key__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lru_bug.c b/tools/testing/selftests/bpf/prog_tests/lru_bug.c new file mode 100644 index 000000000..3c7822390 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lru_bug.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +#include "lru_bug.skel.h" + +void test_lru_bug(void) +{ + struct lru_bug *skel; + int ret; + + skel = lru_bug__open_and_load(); + if (!ASSERT_OK_PTR(skel, "lru_bug__open_and_load")) + return; + ret = lru_bug__attach(skel); + if (!ASSERT_OK(ret, "lru_bug__attach")) + goto end; + usleep(1); + ASSERT_OK(skel->data->result, "prealloc_lru_pop doesn't call check_and_init_map_value"); +end: + lru_bug__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c new file mode 100644 index 000000000..f117bfef6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <sys/types.h> +#include <sys/socket.h> +#include <test_progs.h> +#include <bpf/btf.h> + +#include "lsm_cgroup.skel.h" +#include "lsm_cgroup_nonvoid.skel.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" + +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif + +static struct btf *btf; + +static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func) +{ + LIBBPF_OPTS(bpf_prog_query_opts, p); + int cnt = 0; + int i; + + ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query"); + + if (!attach_func) + return p.prog_cnt; + + /* When attach_func is provided, count the number of progs that + * attach to the given symbol. + */ + + if (!btf) + btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK(libbpf_get_error(btf), "btf_vmlinux")) + return -1; + + p.prog_ids = malloc(sizeof(u32) * p.prog_cnt); + p.prog_attach_flags = malloc(sizeof(u32) * p.prog_cnt); + ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query"); + + for (i = 0; i < p.prog_cnt; i++) { + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + int fd; + + fd = bpf_prog_get_fd_by_id(p.prog_ids[i]); + ASSERT_GE(fd, 0, "prog_get_fd_by_id"); + ASSERT_OK(bpf_obj_get_info_by_fd(fd, &info, &info_len), "prog_info_by_fd"); + close(fd); + + if (info.attach_btf_id == + btf__find_by_name_kind(btf, attach_func, BTF_KIND_FUNC)) + cnt++; + } + + free(p.prog_ids); + free(p.prog_attach_flags); + + return cnt; +} + +static void test_lsm_cgroup_functional(void) +{ + DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int cgroup_fd = -1, cgroup_fd2 = -1, cgroup_fd3 = -1; + int listen_fd, client_fd, accepted_fd; + struct lsm_cgroup *skel = NULL; + int post_create_prog_fd2 = -1; + int post_create_prog_fd = -1; + int bind_link_fd2 = -1; + int bind_prog_fd2 = -1; + int alloc_prog_fd = -1; + int bind_prog_fd = -1; + int bind_link_fd = -1; + int clone_prog_fd = -1; + int err, fd, prio; + socklen_t socklen; + + cgroup_fd3 = test__join_cgroup("/sock_policy_empty"); + if (!ASSERT_GE(cgroup_fd3, 0, "create empty cgroup")) + goto close_cgroup; + + cgroup_fd2 = test__join_cgroup("/sock_policy_reuse"); + if (!ASSERT_GE(cgroup_fd2, 0, "create cgroup for reuse")) + goto close_cgroup; + + cgroup_fd = test__join_cgroup("/sock_policy"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) + goto close_cgroup; + + skel = lsm_cgroup__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + goto close_cgroup; + + post_create_prog_fd = bpf_program__fd(skel->progs.socket_post_create); + post_create_prog_fd2 = bpf_program__fd(skel->progs.socket_post_create2); + bind_prog_fd = bpf_program__fd(skel->progs.socket_bind); + bind_prog_fd2 = bpf_program__fd(skel->progs.socket_bind2); + alloc_prog_fd = bpf_program__fd(skel->progs.socket_alloc); + clone_prog_fd = bpf_program__fd(skel->progs.socket_clone); + + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 0, "prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 0, "total prog count"); + err = bpf_prog_attach(alloc_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0); + if (err == -ENOTSUPP) { + test__skip(); + goto close_cgroup; + } + if (!ASSERT_OK(err, "attach alloc_prog_fd")) + goto detach_cgroup; + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 1, "prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 1, "total prog count"); + + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 0, "prog count"); + err = bpf_prog_attach(clone_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0); + if (!ASSERT_OK(err, "attach clone_prog_fd")) + goto detach_cgroup; + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 1, "prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 2, "total prog count"); + + /* Make sure replacing works. */ + + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 0, "prog count"); + err = bpf_prog_attach(post_create_prog_fd, cgroup_fd, + BPF_LSM_CGROUP, 0); + if (!ASSERT_OK(err, "attach post_create_prog_fd")) + goto detach_cgroup; + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count"); + + attach_opts.replace_prog_fd = post_create_prog_fd; + err = bpf_prog_attach_opts(post_create_prog_fd2, cgroup_fd, + BPF_LSM_CGROUP, &attach_opts); + if (!ASSERT_OK(err, "prog replace post_create_prog_fd")) + goto detach_cgroup; + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count"); + + /* Try the same attach/replace via link API. */ + + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 0, "prog count"); + bind_link_fd = bpf_link_create(bind_prog_fd, cgroup_fd, + BPF_LSM_CGROUP, NULL); + if (!ASSERT_GE(bind_link_fd, 0, "link create bind_prog_fd")) + goto detach_cgroup; + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count"); + + update_opts.old_prog_fd = bind_prog_fd; + update_opts.flags = BPF_F_REPLACE; + + err = bpf_link_update(bind_link_fd, bind_prog_fd2, &update_opts); + if (!ASSERT_OK(err, "link update bind_prog_fd")) + goto detach_cgroup; + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count"); + + /* Attach another instance of bind program to another cgroup. + * This should trigger the reuse of the trampoline shim (two + * programs attaching to the same btf_id). + */ + + ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 0, "prog count"); + bind_link_fd2 = bpf_link_create(bind_prog_fd2, cgroup_fd2, + BPF_LSM_CGROUP, NULL); + if (!ASSERT_GE(bind_link_fd2, 0, "link create bind_prog_fd2")) + goto detach_cgroup; + ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 1, "prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count"); + ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count"); + + fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR + || skel->kconfig->CONFIG_SECURITY_SELINUX + || skel->kconfig->CONFIG_SECURITY_SMACK)) + /* AF_UNIX is prohibited. */ + ASSERT_LT(fd, 0, "socket(AF_UNIX)"); + close(fd); + + /* AF_INET6 gets default policy (sk_priority). */ + + fd = socket(AF_INET6, SOCK_STREAM, 0); + if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)")) + goto detach_cgroup; + + prio = 0; + socklen = sizeof(prio); + ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0, + "getsockopt"); + ASSERT_EQ(prio, 123, "sk_priority"); + + close(fd); + + /* TX-only AF_PACKET is allowed. */ + + ASSERT_LT(socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)), 0, + "socket(AF_PACKET, ..., ETH_P_ALL)"); + + fd = socket(AF_PACKET, SOCK_RAW, 0); + ASSERT_GE(fd, 0, "socket(AF_PACKET, ..., 0)"); + + /* TX-only AF_PACKET can not be rebound. */ + + struct sockaddr_ll sa = { + .sll_family = AF_PACKET, + .sll_protocol = htons(ETH_P_ALL), + }; + ASSERT_LT(bind(fd, (struct sockaddr *)&sa, sizeof(sa)), 0, + "bind(ETH_P_ALL)"); + + close(fd); + + /* Trigger passive open. */ + + listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + ASSERT_GE(listen_fd, 0, "start_server"); + client_fd = connect_to_fd(listen_fd, 0); + ASSERT_GE(client_fd, 0, "connect_to_fd"); + accepted_fd = accept(listen_fd, NULL, NULL); + ASSERT_GE(accepted_fd, 0, "accept"); + + prio = 0; + socklen = sizeof(prio); + ASSERT_GE(getsockopt(accepted_fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0, + "getsockopt"); + ASSERT_EQ(prio, 234, "sk_priority"); + + /* These are replaced and never called. */ + ASSERT_EQ(skel->bss->called_socket_post_create, 0, "called_create"); + ASSERT_EQ(skel->bss->called_socket_bind, 0, "called_bind"); + + /* AF_INET6+SOCK_STREAM + * AF_PACKET+SOCK_RAW + * AF_UNIX+SOCK_RAW if already have non-bpf lsms installed + * listen_fd + * client_fd + * accepted_fd + */ + if (skel->kconfig->CONFIG_SECURITY_APPARMOR + || skel->kconfig->CONFIG_SECURITY_SELINUX + || skel->kconfig->CONFIG_SECURITY_SMACK) + /* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */ + ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2"); + else + ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2"); + + /* start_server + * bind(ETH_P_ALL) + */ + ASSERT_EQ(skel->bss->called_socket_bind2, 2, "called_bind2"); + /* Single accept(). */ + ASSERT_EQ(skel->bss->called_socket_clone, 1, "called_clone"); + + /* AF_UNIX+SOCK_STREAM (failed) + * AF_INET6+SOCK_STREAM + * AF_PACKET+SOCK_RAW (failed) + * AF_PACKET+SOCK_RAW + * listen_fd + * client_fd + * accepted_fd + */ + ASSERT_EQ(skel->bss->called_socket_alloc, 7, "called_alloc"); + + close(listen_fd); + close(client_fd); + close(accepted_fd); + + /* Make sure other cgroup doesn't trigger the programs. */ + + if (!ASSERT_OK(join_cgroup("/sock_policy_empty"), "join root cgroup")) + goto detach_cgroup; + + fd = socket(AF_INET6, SOCK_STREAM, 0); + if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)")) + goto detach_cgroup; + + prio = 0; + socklen = sizeof(prio); + ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0, + "getsockopt"); + ASSERT_EQ(prio, 0, "sk_priority"); + + close(fd); + +detach_cgroup: + ASSERT_GE(bpf_prog_detach2(post_create_prog_fd2, cgroup_fd, + BPF_LSM_CGROUP), 0, "detach_create"); + close(bind_link_fd); + /* Don't close bind_link_fd2, exercise cgroup release cleanup. */ + ASSERT_GE(bpf_prog_detach2(alloc_prog_fd, cgroup_fd, + BPF_LSM_CGROUP), 0, "detach_alloc"); + ASSERT_GE(bpf_prog_detach2(clone_prog_fd, cgroup_fd, + BPF_LSM_CGROUP), 0, "detach_clone"); + +close_cgroup: + close(cgroup_fd); + close(cgroup_fd2); + close(cgroup_fd3); + lsm_cgroup__destroy(skel); +} + +static void test_lsm_cgroup_nonvoid(void) +{ + struct lsm_cgroup_nonvoid *skel = NULL; + + skel = lsm_cgroup_nonvoid__open_and_load(); + ASSERT_NULL(skel, "open succeeds"); + lsm_cgroup_nonvoid__destroy(skel); +} + +void test_lsm_cgroup(void) +{ + if (test__start_subtest("functional")) + test_lsm_cgroup_functional(); + if (test__start_subtest("nonvoid")) + test_lsm_cgroup_nonvoid(); + btf__free(btf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_init.c b/tools/testing/selftests/bpf/prog_tests/map_init.c new file mode 100644 index 000000000..14a31109d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_init.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */ + +#include <test_progs.h> +#include "test_map_init.skel.h" + +#define TEST_VALUE 0x1234 +#define FILL_VALUE 0xdeadbeef + +static int nr_cpus; +static int duration; + +typedef unsigned long long map_key_t; +typedef unsigned long long map_value_t; +typedef struct { + map_value_t v; /* padding */ +} __bpf_percpu_val_align pcpu_map_value_t; + + +static int map_populate(int map_fd, int num) +{ + pcpu_map_value_t value[nr_cpus]; + int i, err; + map_key_t key; + + for (i = 0; i < nr_cpus; i++) + bpf_percpu(value, i) = FILL_VALUE; + + for (key = 1; key <= num; key++) { + err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem")) + return -1; + } + + return 0; +} + +static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz, + int *map_fd, int populate) +{ + struct test_map_init *skel; + int err; + + skel = test_map_init__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return NULL; + + err = bpf_map__set_type(skel->maps.hashmap1, map_type); + if (!ASSERT_OK(err, "bpf_map__set_type")) + goto error; + + err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz); + if (!ASSERT_OK(err, "bpf_map__set_max_entries")) + goto error; + + err = test_map_init__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto error; + + *map_fd = bpf_map__fd(skel->maps.hashmap1); + if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n")) + goto error; + + err = map_populate(*map_fd, populate); + if (!ASSERT_OK(err, "map_populate")) + goto error_map; + + return skel; + +error_map: + close(*map_fd); +error: + test_map_init__destroy(skel); + return NULL; +} + +/* executes bpf program that updates map with key, value */ +static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key, + map_value_t value) +{ + struct test_map_init__bss *bss; + + bss = skel->bss; + + bss->inKey = key; + bss->inValue = value; + bss->inPid = getpid(); + + if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach")) + return -1; + + /* Let tracepoint trigger */ + syscall(__NR_getpgid); + + test_map_init__detach(skel); + + return 0; +} + +static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected) +{ + int i, nzCnt = 0; + map_value_t val; + + for (i = 0; i < nr_cpus; i++) { + val = bpf_percpu(value, i); + if (val) { + if (CHECK(val != expected, "map value", + "unexpected for cpu %d: 0x%llx\n", i, val)) + return -1; + nzCnt++; + } + } + + if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n", + nzCnt)) + return -1; + + return 0; +} + +/* Add key=1 elem with values set for all CPUs + * Delete elem key=1 + * Run bpf prog that inserts new key=1 elem with value=0x1234 + * (bpf prog can only set value for current CPU) + * Lookup Key=1 and check value is as expected for all CPUs: + * value set by bpf prog for one CPU, 0 for all others + */ +static void test_pcpu_map_init(void) +{ + pcpu_map_value_t value[nr_cpus]; + struct test_map_init *skel; + int map_fd, err; + map_key_t key; + + /* max 1 elem in map so insertion is forced to reuse freed entry */ + skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1); + if (!ASSERT_OK_PTR(skel, "prog_setup")) + return; + + /* delete element so the entry can be re-used*/ + key = 1; + err = bpf_map_delete_elem(map_fd, &key); + if (!ASSERT_OK(err, "bpf_map_delete_elem")) + goto cleanup; + + /* run bpf prog that inserts new elem, re-using the slot just freed */ + err = prog_run_insert_elem(skel, key, TEST_VALUE); + if (!ASSERT_OK(err, "prog_run_insert_elem")) + goto cleanup; + + /* check that key=1 was re-created by bpf prog */ + err = bpf_map_lookup_elem(map_fd, &key, value); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + goto cleanup; + + /* and has expected values */ + check_values_one_cpu(value, TEST_VALUE); + +cleanup: + test_map_init__destroy(skel); +} + +/* Add key=1 and key=2 elems with values set for all CPUs + * Run bpf prog that inserts new key=3 elem + * (only for current cpu; other cpus should have initial value = 0) + * Lookup Key=1 and check value is as expected for all CPUs + */ +static void test_pcpu_lru_map_init(void) +{ + pcpu_map_value_t value[nr_cpus]; + struct test_map_init *skel; + int map_fd, err; + map_key_t key; + + /* Set up LRU map with 2 elements, values filled for all CPUs. + * With these 2 elements, the LRU map is full + */ + skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2); + if (!ASSERT_OK_PTR(skel, "prog_setup")) + return; + + /* run bpf prog that inserts new key=3 element, re-using LRU slot */ + key = 3; + err = prog_run_insert_elem(skel, key, TEST_VALUE); + if (!ASSERT_OK(err, "prog_run_insert_elem")) + goto cleanup; + + /* check that key=3 replaced one of earlier elements */ + err = bpf_map_lookup_elem(map_fd, &key, value); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + goto cleanup; + + /* and has expected values */ + check_values_one_cpu(value, TEST_VALUE); + +cleanup: + test_map_init__destroy(skel); +} + +void test_map_init(void) +{ + nr_cpus = bpf_num_possible_cpus(); + if (nr_cpus <= 1) { + printf("%s:SKIP: >1 cpu needed for this test\n", __func__); + test__skip(); + return; + } + + if (test__start_subtest("pcpu_map_init")) + test_pcpu_map_init(); + if (test__start_subtest("pcpu_lru_map_init")) + test_pcpu_lru_map_init(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c new file mode 100644 index 000000000..0d66b1524 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +#include "map_kptr.skel.h" +#include "map_kptr_fail.skel.h" + +static char log_buf[1024 * 1024]; + +struct { + const char *prog_name; + const char *err_msg; +} map_kptr_fail_tests[] = { + { "size_not_bpf_dw", "kptr access size must be BPF_DW" }, + { "non_const_var_off", "kptr access cannot have variable offset" }, + { "non_const_var_off_kptr_xchg", "R1 doesn't have constant offset. kptr has to be" }, + { "misaligned_access_write", "kptr access misaligned expected=8 off=7" }, + { "misaligned_access_read", "kptr access misaligned expected=8 off=1" }, + { "reject_var_off_store", "variable untrusted_ptr_ access var_off=(0x0; 0x1e0)" }, + { "reject_bad_type_match", "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc" }, + { "marked_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" }, + { "correct_btf_id_check_size", "access beyond struct prog_test_ref_kfunc at off 32 size 4" }, + { "inherit_untrusted_on_walk", "R1 type=untrusted_ptr_ expected=percpu_ptr_" }, + { "reject_kptr_xchg_on_unref", "off=8 kptr isn't referenced kptr" }, + { "reject_kptr_get_no_map_val", "arg#0 expected pointer to map value" }, + { "reject_kptr_get_no_null_map_val", "arg#0 expected pointer to map value" }, + { "reject_kptr_get_no_kptr", "arg#0 no referenced kptr at map value offset=0" }, + { "reject_kptr_get_on_unref", "arg#0 no referenced kptr at map value offset=8" }, + { "reject_kptr_get_bad_type_match", "kernel function bpf_kfunc_call_test_kptr_get args#0" }, + { "mark_ref_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" }, + { "reject_untrusted_store_to_ref", "store to referenced kptr disallowed" }, + { "reject_bad_type_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member" }, + { "reject_untrusted_xchg", "R2 type=untrusted_ptr_ expected=ptr_" }, + { "reject_member_of_ref_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc" }, + { "reject_indirect_helper_access", "kptr cannot be accessed indirectly by helper" }, + { "reject_indirect_global_func_access", "kptr cannot be accessed indirectly by helper" }, + { "kptr_xchg_ref_state", "Unreleased reference id=5 alloc_insn=" }, + { "kptr_get_ref_state", "Unreleased reference id=3 alloc_insn=" }, +}; + +static void test_map_kptr_fail_prog(const char *prog_name, const char *err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct map_kptr_fail *skel; + struct bpf_program *prog; + int ret; + + skel = map_kptr_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "map_kptr_fail__open_opts")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto end; + + bpf_program__set_autoload(prog, true); + + ret = map_kptr_fail__load(skel); + if (!ASSERT_ERR(ret, "map_kptr__load must fail")) + goto end; + + if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + +end: + map_kptr_fail__destroy(skel); +} + +static void test_map_kptr_fail(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(map_kptr_fail_tests); i++) { + if (!test__start_subtest(map_kptr_fail_tests[i].prog_name)) + continue; + test_map_kptr_fail_prog(map_kptr_fail_tests[i].prog_name, + map_kptr_fail_tests[i].err_msg); + } +} + +static void test_map_kptr_success(bool test_run) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct map_kptr *skel; + int key = 0, ret; + char buf[16]; + + skel = map_kptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts); + ASSERT_OK(ret, "test_map_kptr_ref refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref retval"); + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts); + ASSERT_OK(ret, "test_map_kptr_ref2 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval"); + + if (test_run) + goto exit; + + ret = bpf_map__update_elem(skel->maps.array_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "array_map update"); + ret = bpf_map__update_elem(skel->maps.array_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "array_map update2"); + + ret = bpf_map__update_elem(skel->maps.hash_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "hash_map update"); + ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "hash_map delete"); + + ret = bpf_map__update_elem(skel->maps.hash_malloc_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "hash_malloc_map update"); + ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "hash_malloc_map delete"); + + ret = bpf_map__update_elem(skel->maps.lru_hash_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "lru_hash_map update"); + ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "lru_hash_map delete"); + +exit: + map_kptr__destroy(skel); +} + +void test_map_kptr(void) +{ + if (test__start_subtest("success")) { + test_map_kptr_success(false); + /* Do test_run twice, so that we see refcount going back to 1 + * after we leave it in map from first iteration. + */ + test_map_kptr_success(true); + } + test_map_kptr_fail(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c new file mode 100644 index 000000000..1d6726f01 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +static void *spin_lock_thread(void *arg) +{ + int err, prog_fd = *(u32 *) arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10000, + ); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run_opts err"); + ASSERT_OK(topts.retval, "test_run_opts retval"); + + pthread_exit(arg); +} + +static void *parallel_map_access(void *arg) +{ + int err, map_fd = *(u32 *) arg; + int vars[17], i, j, rnd, key = 0; + + for (i = 0; i < 10000; i++) { + err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK); + if (CHECK_FAIL(err)) { + printf("lookup failed\n"); + goto out; + } + if (CHECK_FAIL(vars[0] != 0)) { + printf("lookup #%d var[0]=%d\n", i, vars[0]); + goto out; + } + rnd = vars[1]; + for (j = 2; j < 17; j++) { + if (vars[j] == rnd) + continue; + printf("lookup #%d var[1]=%d var[%d]=%d\n", + i, rnd, j, vars[j]); + CHECK_FAIL(vars[j] != rnd); + goto out; + } + } +out: + pthread_exit(arg); +} + +void test_map_lock(void) +{ + const char *file = "./test_map_lock.bpf.o"; + int prog_fd, map_fd[2], vars[17] = {}; + pthread_t thread_id[6]; + struct bpf_object *obj = NULL; + int err = 0, key = 0, i; + void *ret; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); + if (CHECK_FAIL(err)) { + printf("test_map_lock:bpf_prog_test_load errno %d\n", errno); + goto close_prog; + } + map_fd[0] = bpf_find_map(__func__, obj, "hash_map"); + if (CHECK_FAIL(map_fd[0] < 0)) + goto close_prog; + map_fd[1] = bpf_find_map(__func__, obj, "array_map"); + if (CHECK_FAIL(map_fd[1] < 0)) + goto close_prog; + + bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK); + + for (i = 0; i < 4; i++) + if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, + &spin_lock_thread, &prog_fd))) + goto close_prog; + for (i = 4; i < 6; i++) + if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, + ¶llel_map_access, + &map_fd[i - 4]))) + goto close_prog; + for (i = 0; i < 4; i++) + if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || + ret != (void *)&prog_fd)) + goto close_prog; + for (i = 4; i < 6; i++) + if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || + ret != (void *)&map_fd[i - 4])) + goto close_prog; +close_prog: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c new file mode 100644 index 000000000..bfb1bf3fd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Bytedance */ + +#include <test_progs.h> +#include "test_map_lookup_percpu_elem.skel.h" + +void test_map_lookup_percpu_elem(void) +{ + struct test_map_lookup_percpu_elem *skel; + __u64 key = 0, sum; + int ret, i, nr_cpus = libbpf_num_possible_cpus(); + __u64 *buf; + + buf = malloc(nr_cpus*sizeof(__u64)); + if (!ASSERT_OK_PTR(buf, "malloc")) + return; + + for (i = 0; i < nr_cpus; i++) + buf[i] = i; + sum = (nr_cpus - 1) * nr_cpus / 2; + + skel = test_map_lookup_percpu_elem__open(); + if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open")) + goto exit; + + skel->rodata->my_pid = getpid(); + skel->rodata->nr_cpus = nr_cpus; + + ret = test_map_lookup_percpu_elem__load(skel); + if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load")) + goto cleanup; + + ret = test_map_lookup_percpu_elem__attach(skel); + if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach")) + goto cleanup; + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_array_map update"); + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_hash_map update"); + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_lru_hash_map update"); + + syscall(__NR_getuid); + + test_map_lookup_percpu_elem__detach(skel); + + ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem"); + ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem"); + ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem"); + +cleanup: + test_map_lookup_percpu_elem__destroy(skel); +exit: + free(buf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c new file mode 100644 index 000000000..43e502acf --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include <test_progs.h> +#include <network_helpers.h> + +#include "map_ptr_kern.lskel.h" + +void test_map_ptr(void) +{ + struct map_ptr_kern_lskel *skel; + char buf[128]; + int err; + int page_size = getpagesize(); + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); + + skel = map_ptr_kern_lskel__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->maps.m_ringbuf.max_entries = page_size; + + err = map_ptr_kern_lskel__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + skel->bss->page_size = page_size; + + err = bpf_prog_test_run_opts(skel->progs.cg_skb.prog_fd, &topts); + + if (!ASSERT_OK(err, "test_run")) + goto cleanup; + + if (!ASSERT_NEQ(topts.retval, 0, "test_run retval")) + goto cleanup; + +cleanup: + map_ptr_kern_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/metadata.c b/tools/testing/selftests/bpf/prog_tests/metadata.c new file mode 100644 index 000000000..2c53eade8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/metadata.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2020 Google LLC. + */ + +#include <test_progs.h> +#include <cgroup_helpers.h> +#include <network_helpers.h> + +#include "metadata_unused.skel.h" +#include "metadata_used.skel.h" + +static int duration; + +static int prog_holds_map(int prog_fd, int map_fd) +{ + struct bpf_prog_info prog_info = {}; + struct bpf_prog_info map_info = {}; + __u32 prog_info_len; + __u32 map_info_len; + __u32 *map_ids; + int nr_maps; + int ret; + int i; + + map_info_len = sizeof(map_info); + ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len); + if (ret) + return -errno; + + prog_info_len = sizeof(prog_info); + ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); + if (ret) + return -errno; + + map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32)); + if (!map_ids) + return -ENOMEM; + + nr_maps = prog_info.nr_map_ids; + memset(&prog_info, 0, sizeof(prog_info)); + prog_info.nr_map_ids = nr_maps; + prog_info.map_ids = ptr_to_u64(map_ids); + prog_info_len = sizeof(prog_info); + + ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len); + if (ret) { + ret = -errno; + goto free_map_ids; + } + + ret = -ENOENT; + for (i = 0; i < prog_info.nr_map_ids; i++) { + if (map_ids[i] == map_info.id) { + ret = 0; + break; + } + } + +free_map_ids: + free(map_ids); + return ret; +} + +static void test_metadata_unused(void) +{ + struct metadata_unused *obj; + int err; + + obj = metadata_unused__open_and_load(); + if (CHECK(!obj, "skel-load", "errno %d", errno)) + return; + + err = prog_holds_map(bpf_program__fd(obj->progs.prog), + bpf_map__fd(obj->maps.rodata)); + if (CHECK(err, "prog-holds-rodata", "errno: %d", err)) + return; + + /* Assert that we can access the metadata in skel and the values are + * what we expect. + */ + if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "foo", + sizeof(obj->rodata->bpf_metadata_a)), + "bpf_metadata_a", "expected \"foo\", value differ")) + goto close_bpf_object; + if (CHECK(obj->rodata->bpf_metadata_b != 1, "bpf_metadata_b", + "expected 1, got %d", obj->rodata->bpf_metadata_b)) + goto close_bpf_object; + + /* Assert that binding metadata map to prog again succeeds. */ + err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog), + bpf_map__fd(obj->maps.rodata), NULL); + CHECK(err, "rebind_map", "errno %d, expected 0", errno); + +close_bpf_object: + metadata_unused__destroy(obj); +} + +static void test_metadata_used(void) +{ + struct metadata_used *obj; + int err; + + obj = metadata_used__open_and_load(); + if (CHECK(!obj, "skel-load", "errno %d", errno)) + return; + + err = prog_holds_map(bpf_program__fd(obj->progs.prog), + bpf_map__fd(obj->maps.rodata)); + if (CHECK(err, "prog-holds-rodata", "errno: %d", err)) + return; + + /* Assert that we can access the metadata in skel and the values are + * what we expect. + */ + if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "bar", + sizeof(obj->rodata->bpf_metadata_a)), + "metadata_a", "expected \"bar\", value differ")) + goto close_bpf_object; + if (CHECK(obj->rodata->bpf_metadata_b != 2, "metadata_b", + "expected 2, got %d", obj->rodata->bpf_metadata_b)) + goto close_bpf_object; + + /* Assert that binding metadata map to prog again succeeds. */ + err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog), + bpf_map__fd(obj->maps.rodata), NULL); + CHECK(err, "rebind_map", "errno %d, expected 0", errno); + +close_bpf_object: + metadata_used__destroy(obj); +} + +void test_metadata(void) +{ + if (test__start_subtest("unused")) + test_metadata_unused(); + + if (test__start_subtest("used")) + test_metadata_used(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c new file mode 100644 index 000000000..eb2feaac8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c @@ -0,0 +1,559 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Check if we can migrate child sockets. + * + * 1. call listen() for 4 server sockets. + * 2. call connect() for 25 client sockets. + * 3. call listen() for 1 server socket. (migration target) + * 4. update a map to migrate all child sockets + * to the last server socket (migrate_map[cookie] = 4) + * 5. call shutdown() for first 4 server sockets + * and migrate the requests in the accept queue + * to the last server socket. + * 6. call listen() for the second server socket. + * 7. call shutdown() for the last server + * and migrate the requests in the accept queue + * to the second server socket. + * 8. call listen() for the last server. + * 9. call shutdown() for the second server + * and migrate the requests in the accept queue + * to the last server socket. + * 10. call accept() for the last server socket. + * + * Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp> + */ + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include "test_progs.h" +#include "test_migrate_reuseport.skel.h" +#include "network_helpers.h" + +#ifndef TCP_FASTOPEN_CONNECT +#define TCP_FASTOPEN_CONNECT 30 +#endif + +#define IFINDEX_LO 1 + +#define NR_SERVERS 5 +#define NR_CLIENTS (NR_SERVERS * 5) +#define MIGRATED_TO (NR_SERVERS - 1) + +/* fastopenq->max_qlen and sk->sk_max_ack_backlog */ +#define QLEN (NR_CLIENTS * 5) + +#define MSG "Hello World\0" +#define MSGLEN 12 + +static struct migrate_reuseport_test_case { + const char *name; + __s64 servers[NR_SERVERS]; + __s64 clients[NR_CLIENTS]; + struct sockaddr_storage addr; + socklen_t addrlen; + int family; + int state; + bool drop_ack; + bool expire_synack_timer; + bool fastopen; + struct bpf_link *link; +} test_cases[] = { + { + .name = "IPv4 TCP_ESTABLISHED inet_csk_listen_stop", + .family = AF_INET, + .state = BPF_TCP_ESTABLISHED, + .drop_ack = false, + .expire_synack_timer = false, + .fastopen = false, + }, + { + .name = "IPv4 TCP_SYN_RECV inet_csk_listen_stop", + .family = AF_INET, + .state = BPF_TCP_SYN_RECV, + .drop_ack = true, + .expire_synack_timer = false, + .fastopen = true, + }, + { + .name = "IPv4 TCP_NEW_SYN_RECV reqsk_timer_handler", + .family = AF_INET, + .state = BPF_TCP_NEW_SYN_RECV, + .drop_ack = true, + .expire_synack_timer = true, + .fastopen = false, + }, + { + .name = "IPv4 TCP_NEW_SYN_RECV inet_csk_complete_hashdance", + .family = AF_INET, + .state = BPF_TCP_NEW_SYN_RECV, + .drop_ack = true, + .expire_synack_timer = false, + .fastopen = false, + }, + { + .name = "IPv6 TCP_ESTABLISHED inet_csk_listen_stop", + .family = AF_INET6, + .state = BPF_TCP_ESTABLISHED, + .drop_ack = false, + .expire_synack_timer = false, + .fastopen = false, + }, + { + .name = "IPv6 TCP_SYN_RECV inet_csk_listen_stop", + .family = AF_INET6, + .state = BPF_TCP_SYN_RECV, + .drop_ack = true, + .expire_synack_timer = false, + .fastopen = true, + }, + { + .name = "IPv6 TCP_NEW_SYN_RECV reqsk_timer_handler", + .family = AF_INET6, + .state = BPF_TCP_NEW_SYN_RECV, + .drop_ack = true, + .expire_synack_timer = true, + .fastopen = false, + }, + { + .name = "IPv6 TCP_NEW_SYN_RECV inet_csk_complete_hashdance", + .family = AF_INET6, + .state = BPF_TCP_NEW_SYN_RECV, + .drop_ack = true, + .expire_synack_timer = false, + .fastopen = false, + } +}; + +static void init_fds(__s64 fds[], int len) +{ + int i; + + for (i = 0; i < len; i++) + fds[i] = -1; +} + +static void close_fds(__s64 fds[], int len) +{ + int i; + + for (i = 0; i < len; i++) { + if (fds[i] != -1) { + close(fds[i]); + fds[i] = -1; + } + } +} + +static int setup_fastopen(char *buf, int size, int *saved_len, bool restore) +{ + int err = 0, fd, len; + + fd = open("/proc/sys/net/ipv4/tcp_fastopen", O_RDWR); + if (!ASSERT_NEQ(fd, -1, "open")) + return -1; + + if (restore) { + len = write(fd, buf, *saved_len); + if (!ASSERT_EQ(len, *saved_len, "write - restore")) + err = -1; + } else { + *saved_len = read(fd, buf, size); + if (!ASSERT_GE(*saved_len, 1, "read")) { + err = -1; + goto close; + } + + err = lseek(fd, 0, SEEK_SET); + if (!ASSERT_OK(err, "lseek")) + goto close; + + /* (TFO_CLIENT_ENABLE | TFO_SERVER_ENABLE | + * TFO_CLIENT_NO_COOKIE | TFO_SERVER_COOKIE_NOT_REQD) + */ + len = write(fd, "519", 3); + if (!ASSERT_EQ(len, 3, "write - setup")) + err = -1; + } + +close: + close(fd); + + return err; +} + +static int drop_ack(struct migrate_reuseport_test_case *test_case, + struct test_migrate_reuseport *skel) +{ + if (test_case->family == AF_INET) + skel->bss->server_port = ((struct sockaddr_in *) + &test_case->addr)->sin_port; + else + skel->bss->server_port = ((struct sockaddr_in6 *) + &test_case->addr)->sin6_port; + + test_case->link = bpf_program__attach_xdp(skel->progs.drop_ack, + IFINDEX_LO); + if (!ASSERT_OK_PTR(test_case->link, "bpf_program__attach_xdp")) + return -1; + + return 0; +} + +static int pass_ack(struct migrate_reuseport_test_case *test_case) +{ + int err; + + err = bpf_link__destroy(test_case->link); + if (!ASSERT_OK(err, "bpf_link__destroy")) + return -1; + + test_case->link = NULL; + + return 0; +} + +static int start_servers(struct migrate_reuseport_test_case *test_case, + struct test_migrate_reuseport *skel) +{ + int i, err, prog_fd, reuseport = 1, qlen = QLEN; + + prog_fd = bpf_program__fd(skel->progs.migrate_reuseport); + + make_sockaddr(test_case->family, + test_case->family == AF_INET ? "127.0.0.1" : "::1", 0, + &test_case->addr, &test_case->addrlen); + + for (i = 0; i < NR_SERVERS; i++) { + test_case->servers[i] = socket(test_case->family, SOCK_STREAM, + IPPROTO_TCP); + if (!ASSERT_NEQ(test_case->servers[i], -1, "socket")) + return -1; + + err = setsockopt(test_case->servers[i], SOL_SOCKET, + SO_REUSEPORT, &reuseport, sizeof(reuseport)); + if (!ASSERT_OK(err, "setsockopt - SO_REUSEPORT")) + return -1; + + err = bind(test_case->servers[i], + (struct sockaddr *)&test_case->addr, + test_case->addrlen); + if (!ASSERT_OK(err, "bind")) + return -1; + + if (i == 0) { + err = setsockopt(test_case->servers[i], SOL_SOCKET, + SO_ATTACH_REUSEPORT_EBPF, + &prog_fd, sizeof(prog_fd)); + if (!ASSERT_OK(err, + "setsockopt - SO_ATTACH_REUSEPORT_EBPF")) + return -1; + + err = getsockname(test_case->servers[i], + (struct sockaddr *)&test_case->addr, + &test_case->addrlen); + if (!ASSERT_OK(err, "getsockname")) + return -1; + } + + if (test_case->fastopen) { + err = setsockopt(test_case->servers[i], + SOL_TCP, TCP_FASTOPEN, + &qlen, sizeof(qlen)); + if (!ASSERT_OK(err, "setsockopt - TCP_FASTOPEN")) + return -1; + } + + /* All requests will be tied to the first four listeners */ + if (i != MIGRATED_TO) { + err = listen(test_case->servers[i], qlen); + if (!ASSERT_OK(err, "listen")) + return -1; + } + } + + return 0; +} + +static int start_clients(struct migrate_reuseport_test_case *test_case) +{ + char buf[MSGLEN] = MSG; + int i, err; + + for (i = 0; i < NR_CLIENTS; i++) { + test_case->clients[i] = socket(test_case->family, SOCK_STREAM, + IPPROTO_TCP); + if (!ASSERT_NEQ(test_case->clients[i], -1, "socket")) + return -1; + + /* The attached XDP program drops only the final ACK, so + * clients will transition to TCP_ESTABLISHED immediately. + */ + err = settimeo(test_case->clients[i], 100); + if (!ASSERT_OK(err, "settimeo")) + return -1; + + if (test_case->fastopen) { + int fastopen = 1; + + err = setsockopt(test_case->clients[i], IPPROTO_TCP, + TCP_FASTOPEN_CONNECT, &fastopen, + sizeof(fastopen)); + if (!ASSERT_OK(err, + "setsockopt - TCP_FASTOPEN_CONNECT")) + return -1; + } + + err = connect(test_case->clients[i], + (struct sockaddr *)&test_case->addr, + test_case->addrlen); + if (!ASSERT_OK(err, "connect")) + return -1; + + err = write(test_case->clients[i], buf, MSGLEN); + if (!ASSERT_EQ(err, MSGLEN, "write")) + return -1; + } + + return 0; +} + +static int update_maps(struct migrate_reuseport_test_case *test_case, + struct test_migrate_reuseport *skel) +{ + int i, err, migrated_to = MIGRATED_TO; + int reuseport_map_fd, migrate_map_fd; + __u64 value; + + reuseport_map_fd = bpf_map__fd(skel->maps.reuseport_map); + migrate_map_fd = bpf_map__fd(skel->maps.migrate_map); + + for (i = 0; i < NR_SERVERS; i++) { + value = (__u64)test_case->servers[i]; + err = bpf_map_update_elem(reuseport_map_fd, &i, &value, + BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem - reuseport_map")) + return -1; + + err = bpf_map_lookup_elem(reuseport_map_fd, &i, &value); + if (!ASSERT_OK(err, "bpf_map_lookup_elem - reuseport_map")) + return -1; + + err = bpf_map_update_elem(migrate_map_fd, &value, &migrated_to, + BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem - migrate_map")) + return -1; + } + + return 0; +} + +static int migrate_dance(struct migrate_reuseport_test_case *test_case) +{ + int i, err; + + /* Migrate TCP_ESTABLISHED and TCP_SYN_RECV requests + * to the last listener based on eBPF. + */ + for (i = 0; i < MIGRATED_TO; i++) { + err = shutdown(test_case->servers[i], SHUT_RDWR); + if (!ASSERT_OK(err, "shutdown")) + return -1; + } + + /* No dance for TCP_NEW_SYN_RECV to migrate based on eBPF */ + if (test_case->state == BPF_TCP_NEW_SYN_RECV) + return 0; + + /* Note that we use the second listener instead of the + * first one here. + * + * The fist listener is bind()ed with port 0 and, + * SOCK_BINDPORT_LOCK is not set to sk_userlocks, so + * calling listen() again will bind() the first listener + * on a new ephemeral port and detach it from the existing + * reuseport group. (See: __inet_bind(), tcp_set_state()) + * + * OTOH, the second one is bind()ed with a specific port, + * and SOCK_BINDPORT_LOCK is set. Thus, re-listen() will + * resurrect the listener on the existing reuseport group. + */ + err = listen(test_case->servers[1], QLEN); + if (!ASSERT_OK(err, "listen")) + return -1; + + /* Migrate from the last listener to the second one. + * + * All listeners were detached out of the reuseport_map, + * so migration will be done by kernel random pick from here. + */ + err = shutdown(test_case->servers[MIGRATED_TO], SHUT_RDWR); + if (!ASSERT_OK(err, "shutdown")) + return -1; + + /* Back to the existing reuseport group */ + err = listen(test_case->servers[MIGRATED_TO], QLEN); + if (!ASSERT_OK(err, "listen")) + return -1; + + /* Migrate back to the last one from the second one */ + err = shutdown(test_case->servers[1], SHUT_RDWR); + if (!ASSERT_OK(err, "shutdown")) + return -1; + + return 0; +} + +static void count_requests(struct migrate_reuseport_test_case *test_case, + struct test_migrate_reuseport *skel) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + int err, cnt = 0, client; + char buf[MSGLEN]; + + err = settimeo(test_case->servers[MIGRATED_TO], 4000); + if (!ASSERT_OK(err, "settimeo")) + goto out; + + for (; cnt < NR_CLIENTS; cnt++) { + client = accept(test_case->servers[MIGRATED_TO], + (struct sockaddr *)&addr, &len); + if (!ASSERT_NEQ(client, -1, "accept")) + goto out; + + memset(buf, 0, MSGLEN); + read(client, &buf, MSGLEN); + close(client); + + if (!ASSERT_STREQ(buf, MSG, "read")) + goto out; + } + +out: + ASSERT_EQ(cnt, NR_CLIENTS, "count in userspace"); + + switch (test_case->state) { + case BPF_TCP_ESTABLISHED: + cnt = skel->bss->migrated_at_close; + break; + case BPF_TCP_SYN_RECV: + cnt = skel->bss->migrated_at_close_fastopen; + break; + case BPF_TCP_NEW_SYN_RECV: + if (test_case->expire_synack_timer) + cnt = skel->bss->migrated_at_send_synack; + else + cnt = skel->bss->migrated_at_recv_ack; + break; + default: + cnt = 0; + } + + ASSERT_EQ(cnt, NR_CLIENTS, "count in BPF prog"); +} + +static void run_test(struct migrate_reuseport_test_case *test_case, + struct test_migrate_reuseport *skel) +{ + int err, saved_len; + char buf[16]; + + skel->bss->migrated_at_close = 0; + skel->bss->migrated_at_close_fastopen = 0; + skel->bss->migrated_at_send_synack = 0; + skel->bss->migrated_at_recv_ack = 0; + + init_fds(test_case->servers, NR_SERVERS); + init_fds(test_case->clients, NR_CLIENTS); + + if (test_case->fastopen) { + memset(buf, 0, sizeof(buf)); + + err = setup_fastopen(buf, sizeof(buf), &saved_len, false); + if (!ASSERT_OK(err, "setup_fastopen - setup")) + return; + } + + err = start_servers(test_case, skel); + if (!ASSERT_OK(err, "start_servers")) + goto close_servers; + + if (test_case->drop_ack) { + /* Drop the final ACK of the 3-way handshake and stick the + * in-flight requests on TCP_SYN_RECV or TCP_NEW_SYN_RECV. + */ + err = drop_ack(test_case, skel); + if (!ASSERT_OK(err, "drop_ack")) + goto close_servers; + } + + /* Tie requests to the first four listners */ + err = start_clients(test_case); + if (!ASSERT_OK(err, "start_clients")) + goto close_clients; + + err = listen(test_case->servers[MIGRATED_TO], QLEN); + if (!ASSERT_OK(err, "listen")) + goto close_clients; + + err = update_maps(test_case, skel); + if (!ASSERT_OK(err, "fill_maps")) + goto close_clients; + + /* Migrate the requests in the accept queue only. + * TCP_NEW_SYN_RECV requests are not migrated at this point. + */ + err = migrate_dance(test_case); + if (!ASSERT_OK(err, "migrate_dance")) + goto close_clients; + + if (test_case->expire_synack_timer) { + /* Wait for SYN+ACK timers to expire so that + * reqsk_timer_handler() migrates TCP_NEW_SYN_RECV requests. + */ + sleep(1); + } + + if (test_case->link) { + /* Resume 3WHS and migrate TCP_NEW_SYN_RECV requests */ + err = pass_ack(test_case); + if (!ASSERT_OK(err, "pass_ack")) + goto close_clients; + } + + count_requests(test_case, skel); + +close_clients: + close_fds(test_case->clients, NR_CLIENTS); + + if (test_case->link) { + err = pass_ack(test_case); + ASSERT_OK(err, "pass_ack - clean up"); + } + +close_servers: + close_fds(test_case->servers, NR_SERVERS); + + if (test_case->fastopen) { + err = setup_fastopen(buf, sizeof(buf), &saved_len, true); + ASSERT_OK(err, "setup_fastopen - restore"); + } +} + +void serial_test_migrate_reuseport(void) +{ + struct test_migrate_reuseport *skel; + int i; + + skel = test_migrate_reuseport__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + test__start_subtest(test_cases[i].name); + run_test(&test_cases[i], skel); + } + + test_migrate_reuseport__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c new file mode 100644 index 000000000..37b002ca1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/mmap.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <sys/mman.h> +#include "test_mmap.skel.h" + +struct map_data { + __u64 val[512 * 4]; +}; + +static size_t roundup_page(size_t sz) +{ + long page_size = sysconf(_SC_PAGE_SIZE); + return (sz + page_size - 1) / page_size * page_size; +} + +void test_mmap(void) +{ + const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss)); + const size_t map_sz = roundup_page(sizeof(struct map_data)); + const int zero = 0, one = 1, two = 2, far = 1500; + const long page_size = sysconf(_SC_PAGE_SIZE); + int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd; + struct bpf_map *data_map, *bss_map; + void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2; + struct test_mmap__bss *bss_data; + struct bpf_map_info map_info; + __u32 map_info_sz = sizeof(map_info); + struct map_data *map_data; + struct test_mmap *skel; + __u64 val = 0; + + skel = test_mmap__open(); + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + return; + + err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size); + if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) + goto cleanup; + + /* at least 4 pages of data */ + err = bpf_map__set_max_entries(skel->maps.data_map, + 4 * (page_size / sizeof(u64))); + if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) + goto cleanup; + + err = test_mmap__load(skel); + if (CHECK(err != 0, "skel_load", "skeleton load failed\n")) + goto cleanup; + + bss_map = skel->maps.bss; + data_map = skel->maps.data_map; + data_map_fd = bpf_map__fd(data_map); + + rdmap_fd = bpf_map__fd(skel->maps.rdonly_map); + tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0); + if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) { + munmap(tmp1, page_size); + goto cleanup; + } + /* now double-check if it's mmap()'able at all */ + tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0); + if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno)) + goto cleanup; + + /* get map's ID */ + memset(&map_info, 0, map_info_sz); + err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz); + if (CHECK(err, "map_get_info", "failed %d\n", errno)) + goto cleanup; + data_map_id = map_info.id; + + /* mmap BSS map */ + bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + bpf_map__fd(bss_map), 0); + if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap", + ".bss mmap failed: %d\n", errno)) { + bss_mmaped = NULL; + goto cleanup; + } + /* map as R/W first */ + map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + data_map_fd, 0); + if (CHECK(map_mmaped == MAP_FAILED, "data_mmap", + "data_map mmap failed: %d\n", errno)) { + map_mmaped = NULL; + goto cleanup; + } + + bss_data = bss_mmaped; + map_data = map_mmaped; + + CHECK_FAIL(bss_data->in_val); + CHECK_FAIL(bss_data->out_val); + CHECK_FAIL(skel->bss->in_val); + CHECK_FAIL(skel->bss->out_val); + CHECK_FAIL(map_data->val[0]); + CHECK_FAIL(map_data->val[1]); + CHECK_FAIL(map_data->val[2]); + CHECK_FAIL(map_data->val[far]); + + err = test_mmap__attach(skel); + if (CHECK(err, "attach_raw_tp", "err %d\n", err)) + goto cleanup; + + bss_data->in_val = 123; + val = 111; + CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0)); + + usleep(1); + + CHECK_FAIL(bss_data->in_val != 123); + CHECK_FAIL(bss_data->out_val != 123); + CHECK_FAIL(skel->bss->in_val != 123); + CHECK_FAIL(skel->bss->out_val != 123); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 123); + CHECK_FAIL(map_data->val[far] != 3 * 123); + + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val)); + CHECK_FAIL(val != 111); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val)); + CHECK_FAIL(val != 222); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val)); + CHECK_FAIL(val != 123); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val)); + CHECK_FAIL(val != 3 * 123); + + /* data_map freeze should fail due to R/W mmap() */ + err = bpf_map_freeze(data_map_fd); + if (CHECK(!err || errno != EBUSY, "no_freeze", + "data_map freeze succeeded: err=%d, errno=%d\n", err, errno)) + goto cleanup; + + err = mprotect(map_mmaped, map_sz, PROT_READ); + if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno)) + goto cleanup; + + /* unmap R/W mapping */ + err = munmap(map_mmaped, map_sz); + map_mmaped = NULL; + if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno)) + goto cleanup; + + /* re-map as R/O now */ + map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0); + if (CHECK(map_mmaped == MAP_FAILED, "data_mmap", + "data_map R/O mmap failed: %d\n", errno)) { + map_mmaped = NULL; + goto cleanup; + } + err = mprotect(map_mmaped, map_sz, PROT_WRITE); + if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n")) + goto cleanup; + err = mprotect(map_mmaped, map_sz, PROT_EXEC); + if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n")) + goto cleanup; + map_data = map_mmaped; + + /* map/unmap in a loop to test ref counting */ + for (i = 0; i < 10; i++) { + int flags = i % 2 ? PROT_READ : PROT_WRITE; + void *p; + + p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0); + if (CHECK_FAIL(p == MAP_FAILED)) + goto cleanup; + err = munmap(p, map_sz); + if (CHECK_FAIL(err)) + goto cleanup; + } + + /* data_map freeze should now succeed due to no R/W mapping */ + err = bpf_map_freeze(data_map_fd); + if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n", + err, errno)) + goto cleanup; + + /* mapping as R/W now should fail */ + tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + data_map_fd, 0); + if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) { + munmap(tmp1, map_sz); + goto cleanup; + } + + bss_data->in_val = 321; + usleep(1); + CHECK_FAIL(bss_data->in_val != 321); + CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(skel->bss->in_val != 321); + CHECK_FAIL(skel->bss->out_val != 321); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 321); + CHECK_FAIL(map_data->val[far] != 3 * 321); + + /* check some more advanced mmap() manipulations */ + + tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, + -1, 0); + if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno)) + goto cleanup; + + /* map all but last page: pages 1-3 mapped */ + tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, + data_map_fd, 0); + if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) { + munmap(tmp0, 4 * page_size); + goto cleanup; + } + + /* unmap second page: pages 1, 3 mapped */ + err = munmap(tmp1 + page_size, page_size); + if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) { + munmap(tmp1, 4 * page_size); + goto cleanup; + } + + /* map page 2 back */ + tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ, + MAP_SHARED | MAP_FIXED, data_map_fd, 0); + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) { + munmap(tmp1, page_size); + munmap(tmp1 + 2*page_size, 2 * page_size); + goto cleanup; + } + CHECK(tmp1 + page_size != tmp2, "adv_mmap4", + "tmp1: %p, tmp2: %p\n", tmp1, tmp2); + + /* re-map all 4 pages */ + tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, + data_map_fd, 0); + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) { + munmap(tmp1, 4 * page_size); /* unmap page 1 */ + goto cleanup; + } + CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2); + + map_data = tmp2; + CHECK_FAIL(bss_data->in_val != 321); + CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(skel->bss->in_val != 321); + CHECK_FAIL(skel->bss->out_val != 321); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 321); + CHECK_FAIL(map_data->val[far] != 3 * 321); + + munmap(tmp2, 4 * page_size); + + /* map all 4 pages, but with pg_off=1 page, should fail */ + tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, + data_map_fd, page_size /* initial page shift */); + if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) { + munmap(tmp1, 4 * page_size); + goto cleanup; + } + + tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0); + if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno)) + goto cleanup; + + test_mmap__destroy(skel); + skel = NULL; + CHECK_FAIL(munmap(bss_mmaped, bss_sz)); + bss_mmaped = NULL; + CHECK_FAIL(munmap(map_mmaped, map_sz)); + map_mmaped = NULL; + + /* map should be still held by active mmap */ + tmp_fd = bpf_map_get_fd_by_id(data_map_id); + if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) { + munmap(tmp1, map_sz); + goto cleanup; + } + close(tmp_fd); + + /* this should release data map finally */ + munmap(tmp1, map_sz); + + /* we need to wait for RCU grace period */ + for (i = 0; i < 10000; i++) { + __u32 id = data_map_id - 1; + if (bpf_map_get_next_id(id, &id) || id > data_map_id) + break; + usleep(1); + } + + /* should fail to get map FD by non-existing ID */ + tmp_fd = bpf_map_get_fd_by_id(data_map_id); + if (CHECK(tmp_fd >= 0, "get_map_by_id_after", + "unexpectedly succeeded %d\n", tmp_fd)) { + close(tmp_fd); + goto cleanup; + } + +cleanup: + if (bss_mmaped) + CHECK_FAIL(munmap(bss_mmaped, bss_sz)); + if (map_mmaped) + CHECK_FAIL(munmap(map_mmaped, map_sz)); + test_mmap__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c new file mode 100644 index 000000000..5d9955af6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2020 Google LLC. + */ + +#include <test_progs.h> +#include "modify_return.skel.h" + +#define LOWER(x) ((x) & 0xffff) +#define UPPER(x) ((x) >> 16) + + +static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret) +{ + struct modify_return *skel = NULL; + int err, prog_fd; + __u16 side_effect; + __s16 ret; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + skel = modify_return__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + err = modify_return__attach(skel); + if (!ASSERT_OK(err, "modify_return__attach failed")) + goto cleanup; + + skel->bss->input_retval = input_retval; + prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + + side_effect = UPPER(topts.retval); + ret = LOWER(topts.retval); + + ASSERT_EQ(ret, want_ret, "test_run ret"); + ASSERT_EQ(side_effect, want_side_effect, "modify_return side_effect"); + ASSERT_EQ(skel->bss->fentry_result, 1, "modify_return fentry_result"); + ASSERT_EQ(skel->bss->fexit_result, 1, "modify_return fexit_result"); + ASSERT_EQ(skel->bss->fmod_ret_result, 1, "modify_return fmod_ret_result"); + +cleanup: + modify_return__destroy(skel); +} + +/* TODO: conflict with get_func_ip_test */ +void serial_test_modify_return(void) +{ + run_test(0 /* input_retval */, + 1 /* want_side_effect */, + 4 /* want_ret */); + run_test(-EINVAL /* input_retval */, + 0 /* want_side_effect */, + -EINVAL /* want_ret */); +} diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c new file mode 100644 index 000000000..6d0e50dcf --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include <stdbool.h> +#include "test_module_attach.skel.h" + +static int duration; + +static int trigger_module_test_writable(int *val) +{ + int fd, err; + char buf[65]; + ssize_t rd; + + fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY); + err = -errno; + if (!ASSERT_GE(fd, 0, "testmode_file_open")) + return err; + + rd = read(fd, buf, sizeof(buf) - 1); + err = -errno; + if (!ASSERT_GT(rd, 0, "testmod_file_rd_val")) { + close(fd); + return err; + } + + buf[rd] = '\0'; + *val = strtol(buf, NULL, 0); + close(fd); + + return 0; +} + +static int delete_module(const char *name, int flags) +{ + return syscall(__NR_delete_module, name, flags); +} + +void test_module_attach(void) +{ + const int READ_SZ = 456; + const int WRITE_SZ = 457; + struct test_module_attach* skel; + struct test_module_attach__bss *bss; + struct bpf_link *link; + int err; + int writable_val = 0; + + skel = test_module_attach__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + err = bpf_program__set_attach_target(skel->progs.handle_fentry_manual, + 0, "bpf_testmod_test_read"); + ASSERT_OK(err, "set_attach_target"); + + err = test_module_attach__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton\n")) + return; + + bss = skel->bss; + + err = test_module_attach__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger tracepoint */ + ASSERT_OK(trigger_module_test_read(READ_SZ), "trigger_read"); + ASSERT_OK(trigger_module_test_write(WRITE_SZ), "trigger_write"); + + ASSERT_EQ(bss->raw_tp_read_sz, READ_SZ, "raw_tp"); + ASSERT_EQ(bss->raw_tp_bare_write_sz, WRITE_SZ, "raw_tp_bare"); + ASSERT_EQ(bss->tp_btf_read_sz, READ_SZ, "tp_btf"); + ASSERT_EQ(bss->fentry_read_sz, READ_SZ, "fentry"); + ASSERT_EQ(bss->fentry_manual_read_sz, READ_SZ, "fentry_manual"); + ASSERT_EQ(bss->fexit_read_sz, READ_SZ, "fexit"); + ASSERT_EQ(bss->fexit_ret, -EIO, "fexit_tet"); + ASSERT_EQ(bss->fmod_ret_read_sz, READ_SZ, "fmod_ret"); + + bss->raw_tp_writable_bare_early_ret = true; + bss->raw_tp_writable_bare_out_val = 0xf1f2f3f4; + ASSERT_OK(trigger_module_test_writable(&writable_val), + "trigger_writable"); + ASSERT_EQ(bss->raw_tp_writable_bare_in_val, 1024, "writable_test_in"); + ASSERT_EQ(bss->raw_tp_writable_bare_out_val, writable_val, + "writable_test_out"); + + test_module_attach__detach(skel); + + /* attach fentry/fexit and make sure it get's module reference */ + link = bpf_program__attach(skel->progs.handle_fentry); + if (!ASSERT_OK_PTR(link, "attach_fentry")) + goto cleanup; + + ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + bpf_link__destroy(link); + + link = bpf_program__attach(skel->progs.handle_fexit); + if (!ASSERT_OK_PTR(link, "attach_fexit")) + goto cleanup; + + ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + bpf_link__destroy(link); + +cleanup: + test_module_attach__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c new file mode 100644 index 000000000..59f08d6d1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Tessares SA. */ +/* Copyright (c) 2022, SUSE. */ + +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "mptcp_sock.skel.h" + +#ifndef TCP_CA_NAME_MAX +#define TCP_CA_NAME_MAX 16 +#endif + +struct mptcp_storage { + __u32 invoked; + __u32 is_mptcp; + struct sock *sk; + __u32 token; + struct sock *first; + char ca_name[TCP_CA_NAME_MAX]; +}; + +static int verify_tsk(int map_fd, int client_fd) +{ + int err, cfd = client_fd; + struct mptcp_storage val; + + err = bpf_map_lookup_elem(map_fd, &cfd, &val); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + return err; + + if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count")) + err++; + + if (!ASSERT_EQ(val.is_mptcp, 0, "unexpected is_mptcp")) + err++; + + return err; +} + +static void get_msk_ca_name(char ca_name[]) +{ + size_t len; + int fd; + + fd = open("/proc/sys/net/ipv4/tcp_congestion_control", O_RDONLY); + if (!ASSERT_GE(fd, 0, "failed to open tcp_congestion_control")) + return; + + len = read(fd, ca_name, TCP_CA_NAME_MAX); + if (!ASSERT_GT(len, 0, "failed to read ca_name")) + goto err; + + if (len > 0 && ca_name[len - 1] == '\n') + ca_name[len - 1] = '\0'; + +err: + close(fd); +} + +static int verify_msk(int map_fd, int client_fd, __u32 token) +{ + char ca_name[TCP_CA_NAME_MAX]; + int err, cfd = client_fd; + struct mptcp_storage val; + + if (!ASSERT_GT(token, 0, "invalid token")) + return -1; + + get_msk_ca_name(ca_name); + + err = bpf_map_lookup_elem(map_fd, &cfd, &val); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + return err; + + if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count")) + err++; + + if (!ASSERT_EQ(val.is_mptcp, 1, "unexpected is_mptcp")) + err++; + + if (!ASSERT_EQ(val.token, token, "unexpected token")) + err++; + + if (!ASSERT_EQ(val.first, val.sk, "unexpected first")) + err++; + + if (!ASSERT_STRNEQ(val.ca_name, ca_name, TCP_CA_NAME_MAX, "unexpected ca_name")) + err++; + + return err; +} + +static int run_test(int cgroup_fd, int server_fd, bool is_mptcp) +{ + int client_fd, prog_fd, map_fd, err; + struct mptcp_sock *sock_skel; + + sock_skel = mptcp_sock__open_and_load(); + if (!ASSERT_OK_PTR(sock_skel, "skel_open_load")) + return -EIO; + + err = mptcp_sock__attach(sock_skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + prog_fd = bpf_program__fd(sock_skel->progs._sockops); + if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) { + err = -EIO; + goto out; + } + + map_fd = bpf_map__fd(sock_skel->maps.socket_storage_map); + if (!ASSERT_GE(map_fd, 0, "bpf_map__fd")) { + err = -EIO; + goto out; + } + + err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + + client_fd = connect_to_fd(server_fd, 0); + if (!ASSERT_GE(client_fd, 0, "connect to fd")) { + err = -EIO; + goto out; + } + + err += is_mptcp ? verify_msk(map_fd, client_fd, sock_skel->bss->token) : + verify_tsk(map_fd, client_fd); + + close(client_fd); + +out: + mptcp_sock__destroy(sock_skel); + return err; +} + +static void test_base(void) +{ + int server_fd, cgroup_fd; + + cgroup_fd = test__join_cgroup("/mptcp"); + if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) + return; + + /* without MPTCP */ + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_server")) + goto with_mptcp; + + ASSERT_OK(run_test(cgroup_fd, server_fd, false), "run_test tcp"); + + close(server_fd); + +with_mptcp: + /* with MPTCP */ + server_fd = start_mptcp_server(AF_INET, NULL, 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_mptcp_server")) + goto close_cgroup_fd; + + ASSERT_OK(run_test(cgroup_fd, server_fd, true), "run_test mptcp"); + + close(server_fd); + +close_cgroup_fd: + close(cgroup_fd); +} + +void test_mptcp(void) +{ + if (test__start_subtest("base")) + test_base(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c new file mode 100644 index 000000000..d3915c58d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <sys/sysinfo.h> +#include <test_progs.h> +#include "network_helpers.h" +#include "netcnt_prog.skel.h" +#include "netcnt_common.h" + +#define CG_NAME "/netcnt" + +void serial_test_netcnt(void) +{ + union percpu_net_cnt *percpu_netcnt = NULL; + struct bpf_cgroup_storage_key key; + int map_fd, percpu_map_fd; + struct netcnt_prog *skel; + unsigned long packets; + union net_cnt netcnt; + unsigned long bytes; + int cpu, nproc; + int cg_fd = -1; + char cmd[128]; + + skel = netcnt_prog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "netcnt_prog__open_and_load")) + return; + + nproc = bpf_num_possible_cpus(); + percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc); + if (!ASSERT_OK_PTR(percpu_netcnt, "malloc(percpu_netcnt)")) + goto err; + + cg_fd = test__join_cgroup(CG_NAME); + if (!ASSERT_GE(cg_fd, 0, "test__join_cgroup")) + goto err; + + skel->links.bpf_nextcnt = bpf_program__attach_cgroup(skel->progs.bpf_nextcnt, cg_fd); + if (!ASSERT_OK_PTR(skel->links.bpf_nextcnt, + "attach_cgroup(bpf_nextcnt)")) + goto err; + + snprintf(cmd, sizeof(cmd), "%s ::1 -A -c 10000 -q > /dev/null", ping_command(AF_INET6)); + ASSERT_OK(system(cmd), cmd); + + map_fd = bpf_map__fd(skel->maps.netcnt); + if (!ASSERT_OK(bpf_map_get_next_key(map_fd, NULL, &key), "bpf_map_get_next_key")) + goto err; + + if (!ASSERT_OK(bpf_map_lookup_elem(map_fd, &key, &netcnt), "bpf_map_lookup_elem(netcnt)")) + goto err; + + percpu_map_fd = bpf_map__fd(skel->maps.percpu_netcnt); + if (!ASSERT_OK(bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0]), + "bpf_map_lookup_elem(percpu_netcnt)")) + goto err; + + /* Some packets can be still in per-cpu cache, but not more than + * MAX_PERCPU_PACKETS. + */ + packets = netcnt.packets; + bytes = netcnt.bytes; + for (cpu = 0; cpu < nproc; cpu++) { + ASSERT_LE(percpu_netcnt[cpu].packets, MAX_PERCPU_PACKETS, "MAX_PERCPU_PACKETS"); + + packets += percpu_netcnt[cpu].packets; + bytes += percpu_netcnt[cpu].bytes; + } + + /* No packets should be lost */ + ASSERT_EQ(packets, 10000, "packets"); + + /* Let's check that bytes counter matches the number of packets + * multiplied by the size of ipv6 ICMP packet. + */ + ASSERT_EQ(bytes, packets * 104, "bytes"); + +err: + if (cg_fd != -1) + close(cg_fd); + free(percpu_netcnt); + netcnt_prog__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/netns_cookie.c b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c new file mode 100644 index 000000000..71d8f3ba7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/netns_cookie.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "netns_cookie_prog.skel.h" +#include "network_helpers.h" + +#ifndef SO_NETNS_COOKIE +#define SO_NETNS_COOKIE 71 +#endif + +static int duration; + +void test_netns_cookie(void) +{ + int server_fd = -1, client_fd = -1, cgroup_fd = -1; + int err, val, ret, map, verdict; + struct netns_cookie_prog *skel; + uint64_t cookie_expected_value; + socklen_t vallen = sizeof(cookie_expected_value); + static const char send_msg[] = "message"; + + skel = netns_cookie_prog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + cgroup_fd = test__join_cgroup("/netns_cookie"); + if (CHECK(cgroup_fd < 0, "join_cgroup", "cgroup creation failed\n")) + goto done; + + skel->links.get_netns_cookie_sockops = bpf_program__attach_cgroup( + skel->progs.get_netns_cookie_sockops, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.get_netns_cookie_sockops, "prog_attach")) + goto done; + + verdict = bpf_program__fd(skel->progs.get_netns_cookie_sk_msg); + map = bpf_map__fd(skel->maps.sock_map); + err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0); + if (!ASSERT_OK(err, "prog_attach")) + goto done; + + server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + if (CHECK(server_fd < 0, "start_server", "errno %d\n", errno)) + goto done; + + client_fd = connect_to_fd(server_fd, 0); + if (CHECK(client_fd < 0, "connect_to_fd", "errno %d\n", errno)) + goto done; + + ret = send(client_fd, send_msg, sizeof(send_msg), 0); + if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n", ret)) + goto done; + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sockops_netns_cookies), + &client_fd, &val); + if (!ASSERT_OK(err, "map_lookup(sockops_netns_cookies)")) + goto done; + + err = getsockopt(client_fd, SOL_SOCKET, SO_NETNS_COOKIE, + &cookie_expected_value, &vallen); + if (!ASSERT_OK(err, "getsockopt")) + goto done; + + ASSERT_EQ(val, cookie_expected_value, "cookie_value"); + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sk_msg_netns_cookies), + &client_fd, &val); + if (!ASSERT_OK(err, "map_lookup(sk_msg_netns_cookies)")) + goto done; + + ASSERT_EQ(val, cookie_expected_value, "cookie_value"); + +done: + if (server_fd != -1) + close(server_fd); + if (client_fd != -1) + close(client_fd); + if (cgroup_fd != -1) + close(cgroup_fd); + netns_cookie_prog__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c new file mode 100644 index 000000000..24d493482 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Carlos Neira cneirabustos@gmail.com */ + +#define _GNU_SOURCE +#include <test_progs.h> +#include "test_ns_current_pid_tgid.skel.h" +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#include <sys/syscall.h> +#include <sched.h> +#include <sys/wait.h> +#include <sys/mount.h> +#include <sys/fcntl.h> + +#define STACK_SIZE (1024 * 1024) +static char child_stack[STACK_SIZE]; + +static int test_current_pid_tgid(void *args) +{ + struct test_ns_current_pid_tgid__bss *bss; + struct test_ns_current_pid_tgid *skel; + int err = -1, duration = 0; + pid_t tgid, pid; + struct stat st; + + skel = test_ns_current_pid_tgid__open_and_load(); + if (CHECK(!skel, "skel_open_load", "failed to load skeleton\n")) + goto cleanup; + + pid = syscall(SYS_gettid); + tgid = getpid(); + + err = stat("/proc/self/ns/pid", &st); + if (CHECK(err, "stat", "failed /proc/self/ns/pid: %d\n", err)) + goto cleanup; + + bss = skel->bss; + bss->dev = st.st_dev; + bss->ino = st.st_ino; + bss->user_pid = 0; + bss->user_tgid = 0; + + err = test_ns_current_pid_tgid__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + ASSERT_EQ(bss->user_pid, pid, "pid"); + ASSERT_EQ(bss->user_tgid, tgid, "tgid"); + err = 0; + +cleanup: + test_ns_current_pid_tgid__destroy(skel); + + return err; +} + +static void test_ns_current_pid_tgid_new_ns(void) +{ + int wstatus, duration = 0; + pid_t cpid; + + /* Create a process in a new namespace, this process + * will be the init process of this new namespace hence will be pid 1. + */ + cpid = clone(test_current_pid_tgid, child_stack + STACK_SIZE, + CLONE_NEWPID | SIGCHLD, NULL); + + if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno))) + return; + + if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno))) + return; + + if (CHECK(WEXITSTATUS(wstatus) != 0, "newns_pidtgid", "failed")) + return; +} + +/* TODO: use a different tracepoint */ +void serial_test_ns_current_pid_tgid(void) +{ + if (test__start_subtest("ns_current_pid_tgid_root_ns")) + test_current_pid_tgid(NULL); + if (test__start_subtest("ns_current_pid_tgid_new_ns")) + test_ns_current_pid_tgid_new_ns(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c new file mode 100644 index 000000000..7093edca6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_obj_name(void) +{ + struct { + const char *name; + int success; + int expected_errno; + } tests[] = { + { "", 1, 0 }, + { "_123456789ABCDE", 1, 0 }, + { "_123456789ABCDEF", 0, EINVAL }, + { "_123456789ABCD\n", 0, EINVAL }, + }; + struct bpf_insn prog[] = { + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + __u32 duration = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + size_t name_len = strlen(tests[i].name) + 1; + union bpf_attr attr; + size_t ncopy; + int fd; + + /* test different attr.prog_name during BPF_PROG_LOAD */ + ncopy = name_len < sizeof(attr.prog_name) ? + name_len : sizeof(attr.prog_name); + bzero(&attr, sizeof(attr)); + attr.prog_type = BPF_PROG_TYPE_SCHED_CLS; + attr.insn_cnt = 2; + attr.insns = ptr_to_u64(prog); + attr.license = ptr_to_u64(""); + memcpy(attr.prog_name, tests[i].name, ncopy); + + fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); + CHECK((tests[i].success && fd < 0) || + (!tests[i].success && fd >= 0) || + (!tests[i].success && errno != tests[i].expected_errno), + "check-bpf-prog-name", + "fd %d(%d) errno %d(%d)\n", + fd, tests[i].success, errno, tests[i].expected_errno); + + if (fd >= 0) + close(fd); + + /* test different attr.map_name during BPF_MAP_CREATE */ + ncopy = name_len < sizeof(attr.map_name) ? + name_len : sizeof(attr.map_name); + bzero(&attr, sizeof(attr)); + attr.map_type = BPF_MAP_TYPE_ARRAY; + attr.key_size = 4; + attr.value_size = 4; + attr.max_entries = 1; + attr.map_flags = 0; + memcpy(attr.map_name, tests[i].name, ncopy); + fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); + CHECK((tests[i].success && fd < 0) || + (!tests[i].success && fd >= 0) || + (!tests[i].success && errno != tests[i].expected_errno), + "check-bpf-map-name", + "fd %d(%d) errno %d(%d)\n", + fd, tests[i].success, errno, tests[i].expected_errno); + + if (fd >= 0) + close(fd); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c b/tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c new file mode 100644 index 000000000..673d38395 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include <linux/bpf.h> +#include "test_pe_preserve_elems.skel.h" + +static int duration; + +static void test_one_map(struct bpf_map *map, struct bpf_program *prog, + bool has_share_pe) +{ + int err, key = 0, pfd = -1, mfd = bpf_map__fd(map); + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts); + struct perf_event_attr attr = { + .size = sizeof(struct perf_event_attr), + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CPU_CLOCK, + }; + + pfd = syscall(__NR_perf_event_open, &attr, 0 /* pid */, + -1 /* cpu 0 */, -1 /* group id */, 0 /* flags */); + if (CHECK(pfd < 0, "perf_event_open", "failed\n")) + return; + + err = bpf_map_update_elem(mfd, &key, &pfd, BPF_ANY); + close(pfd); + if (CHECK(err < 0, "bpf_map_update_elem", "failed\n")) + return; + + err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts); + if (CHECK(err < 0, "bpf_prog_test_run_opts", "failed\n")) + return; + if (CHECK(opts.retval != 0, "bpf_perf_event_read_value", + "failed with %d\n", opts.retval)) + return; + + /* closing mfd, prog still holds a reference on map */ + close(mfd); + + err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts); + if (CHECK(err < 0, "bpf_prog_test_run_opts", "failed\n")) + return; + + if (has_share_pe) { + CHECK(opts.retval != 0, "bpf_perf_event_read_value", + "failed with %d\n", opts.retval); + } else { + CHECK(opts.retval != -ENOENT, "bpf_perf_event_read_value", + "should have failed with %d, but got %d\n", -ENOENT, + opts.retval); + } +} + +void test_pe_preserve_elems(void) +{ + struct test_pe_preserve_elems *skel; + + skel = test_pe_preserve_elems__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + test_one_map(skel->maps.array_1, skel->progs.read_array_1, false); + test_one_map(skel->maps.array_2, skel->progs.read_array_2, true); + + test_pe_preserve_elems__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c new file mode 100644 index 000000000..bc24f8333 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <sys/socket.h> +#include <test_progs.h> +#include "bpf/libbpf_internal.h" +#include "test_perf_branches.skel.h" + +static void check_good_sample(struct test_perf_branches *skel) +{ + int written_global = skel->bss->written_global_out; + int required_size = skel->bss->required_size_out; + int written_stack = skel->bss->written_stack_out; + int pbe_size = sizeof(struct perf_branch_entry); + int duration = 0; + + if (CHECK(!skel->bss->valid, "output not valid", + "no valid sample from prog")) + return; + + /* + * It's hard to validate the contents of the branch entries b/c it + * would require some kind of disassembler and also encoding the + * valid jump instructions for supported architectures. So just check + * the easy stuff for now. + */ + CHECK(required_size <= 0, "read_branches_size", "err %d\n", required_size); + CHECK(written_stack < 0, "read_branches_stack", "err %d\n", written_stack); + CHECK(written_stack % pbe_size != 0, "read_branches_stack", + "stack bytes written=%d not multiple of struct size=%d\n", + written_stack, pbe_size); + CHECK(written_global < 0, "read_branches_global", "err %d\n", written_global); + CHECK(written_global % pbe_size != 0, "read_branches_global", + "global bytes written=%d not multiple of struct size=%d\n", + written_global, pbe_size); + CHECK(written_global < written_stack, "read_branches_size", + "written_global=%d < written_stack=%d\n", written_global, written_stack); +} + +static void check_bad_sample(struct test_perf_branches *skel) +{ + int written_global = skel->bss->written_global_out; + int required_size = skel->bss->required_size_out; + int written_stack = skel->bss->written_stack_out; + int duration = 0; + + if (CHECK(!skel->bss->valid, "output not valid", + "no valid sample from prog")) + return; + + CHECK((required_size != -EINVAL && required_size != -ENOENT), + "read_branches_size", "err %d\n", required_size); + CHECK((written_stack != -EINVAL && written_stack != -ENOENT), + "read_branches_stack", "written %d\n", written_stack); + CHECK((written_global != -EINVAL && written_global != -ENOENT), + "read_branches_global", "written %d\n", written_global); +} + +static void test_perf_branches_common(int perf_fd, + void (*cb)(struct test_perf_branches *)) +{ + struct test_perf_branches *skel; + int err, i, duration = 0; + bool detached = false; + struct bpf_link *link; + volatile int j = 0; + cpu_set_t cpu_set; + + skel = test_perf_branches__open_and_load(); + if (CHECK(!skel, "test_perf_branches_load", + "perf_branches skeleton failed\n")) + return; + + /* attach perf_event */ + link = bpf_program__attach_perf_event(skel->progs.perf_branches, perf_fd); + if (!ASSERT_OK_PTR(link, "attach_perf_event")) + goto out_destroy_skel; + + /* generate some branches on cpu 0 */ + CPU_ZERO(&cpu_set); + CPU_SET(0, &cpu_set); + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); + if (CHECK(err, "set_affinity", "cpu #0, err %d\n", err)) + goto out_destroy; + /* spin the loop for a while (random high number) */ + for (i = 0; i < 1000000; ++i) + ++j; + + test_perf_branches__detach(skel); + detached = true; + + cb(skel); +out_destroy: + bpf_link__destroy(link); +out_destroy_skel: + if (!detached) + test_perf_branches__detach(skel); + test_perf_branches__destroy(skel); +} + +static void test_perf_branches_hw(void) +{ + struct perf_event_attr attr = {0}; + int duration = 0; + int pfd; + + /* create perf event */ + attr.size = sizeof(attr); + attr.type = PERF_TYPE_HARDWARE; + attr.config = PERF_COUNT_HW_CPU_CYCLES; + attr.freq = 1; + attr.sample_freq = 1000; + attr.sample_type = PERF_SAMPLE_BRANCH_STACK; + attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; + pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); + + /* + * Some setups don't support branch records (virtual machines, !x86), + * so skip test in this case. + */ + if (pfd < 0) { + if (errno == ENOENT || errno == EOPNOTSUPP) { + printf("%s:SKIP:no PERF_SAMPLE_BRANCH_STACK\n", + __func__); + test__skip(); + return; + } + if (CHECK(pfd < 0, "perf_event_open", "err %d errno %d\n", + pfd, errno)) + return; + } + + test_perf_branches_common(pfd, check_good_sample); + + close(pfd); +} + +/* + * Tests negative case -- run bpf_read_branch_records() on improperly configured + * perf event. + */ +static void test_perf_branches_no_hw(void) +{ + struct perf_event_attr attr = {0}; + int duration = 0; + int pfd; + + /* create perf event */ + attr.size = sizeof(attr); + attr.type = PERF_TYPE_SOFTWARE; + attr.config = PERF_COUNT_SW_CPU_CLOCK; + attr.freq = 1; + attr.sample_freq = 1000; + pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); + if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd)) + return; + + test_perf_branches_common(pfd, check_bad_sample); + + close(pfd); +} + +void test_perf_branches(void) +{ + if (test__start_subtest("perf_branches_hw")) + test_perf_branches_hw(); + if (test__start_subtest("perf_branches_no_hw")) + test_perf_branches_no_hw(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c new file mode 100644 index 000000000..5fc2b3a07 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <sys/socket.h> +#include <test_progs.h> +#include "test_perf_buffer.skel.h" +#include "bpf/libbpf_internal.h" + +static int duration; + +/* AddressSanitizer sometimes crashes due to data dereference below, due to + * this being mmap()'ed memory. Disable instrumentation with + * no_sanitize_address attribute + */ +__attribute__((no_sanitize_address)) +static void on_sample(void *ctx, int cpu, void *data, __u32 size) +{ + int cpu_data = *(int *)data, duration = 0; + cpu_set_t *cpu_seen = ctx; + + if (cpu_data != cpu) + CHECK(cpu_data != cpu, "check_cpu_data", + "cpu_data %d != cpu %d\n", cpu_data, cpu); + + CPU_SET(cpu, cpu_seen); +} + +int trigger_on_cpu(int cpu) +{ + cpu_set_t cpu_set; + int err; + + CPU_ZERO(&cpu_set); + CPU_SET(cpu, &cpu_set); + + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); + if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", cpu, err)) + return err; + + usleep(1); + + return 0; +} + +void serial_test_perf_buffer(void) +{ + int err, on_len, nr_on_cpus = 0, nr_cpus, i, j; + int zero = 0, my_pid = getpid(); + struct test_perf_buffer *skel; + cpu_set_t cpu_seen; + struct perf_buffer *pb; + int last_fd = -1, fd; + bool *online; + + nr_cpus = libbpf_num_possible_cpus(); + if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus)) + return; + + err = parse_cpu_mask_file("/sys/devices/system/cpu/online", + &online, &on_len); + if (CHECK(err, "nr_on_cpus", "err %d\n", err)) + return; + + for (i = 0; i < on_len; i++) + if (online[i]) + nr_on_cpus++; + + /* load program */ + skel = test_perf_buffer__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) + goto out_close; + + err = bpf_map_update_elem(bpf_map__fd(skel->maps.my_pid_map), &zero, &my_pid, 0); + if (!ASSERT_OK(err, "my_pid_update")) + goto out_close; + + /* attach probe */ + err = test_perf_buffer__attach(skel); + if (CHECK(err, "attach_kprobe", "err %d\n", err)) + goto out_close; + + /* set up perf buffer */ + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, + on_sample, NULL, &cpu_seen, NULL); + if (!ASSERT_OK_PTR(pb, "perf_buf__new")) + goto out_close; + + CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd", + "bad fd: %d\n", perf_buffer__epoll_fd(pb)); + + /* trigger kprobe on every CPU */ + CPU_ZERO(&cpu_seen); + for (i = 0; i < nr_cpus; i++) { + if (i >= on_len || !online[i]) { + printf("skipping offline CPU #%d\n", i); + continue; + } + + if (trigger_on_cpu(i)) + goto out_close; + } + + /* read perf buffer */ + err = perf_buffer__poll(pb, 100); + if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) + goto out_free_pb; + + if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt", + "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen))) + goto out_free_pb; + + if (CHECK(perf_buffer__buffer_cnt(pb) != nr_on_cpus, "buf_cnt", + "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus)) + goto out_close; + + for (i = 0, j = 0; i < nr_cpus; i++) { + if (i >= on_len || !online[i]) + continue; + + fd = perf_buffer__buffer_fd(pb, j); + CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd); + last_fd = fd; + + err = perf_buffer__consume_buffer(pb, j); + if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err)) + goto out_close; + + CPU_CLR(i, &cpu_seen); + if (trigger_on_cpu(i)) + goto out_close; + + err = perf_buffer__consume_buffer(pb, j); + if (CHECK(err, "consume_buf", "cpu %d, err %d\n", j, err)) + goto out_close; + + if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i)) + goto out_close; + j++; + } + +out_free_pb: + perf_buffer__free(pb); +out_close: + test_perf_buffer__destroy(skel); + free(online); +} diff --git a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c new file mode 100644 index 000000000..f4aad35af --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <test_progs.h> +#include "perf_event_stackmap.skel.h" + +#ifndef noinline +#define noinline __attribute__((noinline)) +#endif + +noinline int func_1(void) +{ + static int val = 1; + + val += 1; + + usleep(100); + return val; +} + +noinline int func_2(void) +{ + return func_1(); +} + +noinline int func_3(void) +{ + return func_2(); +} + +noinline int func_4(void) +{ + return func_3(); +} + +noinline int func_5(void) +{ + return func_4(); +} + +noinline int func_6(void) +{ + int i, val = 1; + + for (i = 0; i < 100; i++) + val += func_5(); + + return val; +} + +void test_perf_event_stackmap(void) +{ + struct perf_event_attr attr = { + /* .type = PERF_TYPE_SOFTWARE, */ + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .precise_ip = 2, + .sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK | + PERF_SAMPLE_CALLCHAIN, + .branch_sample_type = PERF_SAMPLE_BRANCH_USER | + PERF_SAMPLE_BRANCH_NO_FLAGS | + PERF_SAMPLE_BRANCH_NO_CYCLES | + PERF_SAMPLE_BRANCH_CALL_STACK, + .freq = 1, + .sample_freq = read_perf_max_sample_freq(), + .size = sizeof(struct perf_event_attr), + }; + struct perf_event_stackmap *skel; + __u32 duration = 0; + cpu_set_t cpu_set; + int pmu_fd, err; + + skel = perf_event_stackmap__open(); + + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + return; + + err = perf_event_stackmap__load(skel); + if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) + goto cleanup; + + CPU_ZERO(&cpu_set); + CPU_SET(0, &cpu_set); + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); + if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno)) + goto cleanup; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (pmu_fd < 0) { + printf("%s:SKIP:cpu doesn't support the event\n", __func__); + test__skip(); + goto cleanup; + } + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) { + close(pmu_fd); + goto cleanup; + } + + /* create kernel and user stack traces for testing */ + func_6(); + + CHECK(skel->data->stackid_kernel != 2, "get_stackid_kernel", "failed\n"); + CHECK(skel->data->stackid_user != 2, "get_stackid_user", "failed\n"); + CHECK(skel->data->stack_kernel != 2, "get_stack_kernel", "failed\n"); + CHECK(skel->data->stack_user != 2, "get_stack_user", "failed\n"); + +cleanup: + perf_event_stackmap__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c new file mode 100644 index 000000000..224eba6fe --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <test_progs.h> +#include "test_perf_link.skel.h" + +static void burn_cpu(void) +{ + volatile int j = 0; + cpu_set_t cpu_set; + int i, err; + + /* generate some branches on cpu 0 */ + CPU_ZERO(&cpu_set); + CPU_SET(0, &cpu_set); + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); + ASSERT_OK(err, "set_thread_affinity"); + + /* spin the loop for a while (random high number) */ + for (i = 0; i < 1000000; ++i) + ++j; +} + +/* TODO: often fails in concurrent mode */ +void serial_test_perf_link(void) +{ + struct test_perf_link *skel = NULL; + struct perf_event_attr attr; + int pfd = -1, link_fd = -1, err; + int run_cnt_before, run_cnt_after; + struct bpf_link_info info; + __u32 info_len = sizeof(info); + + /* create perf event */ + memset(&attr, 0, sizeof(attr)); + attr.size = sizeof(attr); + attr.type = PERF_TYPE_SOFTWARE; + attr.config = PERF_COUNT_SW_CPU_CLOCK; + attr.freq = 1; + attr.sample_freq = 1000; + pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); + if (!ASSERT_GE(pfd, 0, "perf_fd")) + goto cleanup; + + skel = test_perf_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + link_fd = bpf_link_create(bpf_program__fd(skel->progs.handler), pfd, + BPF_PERF_EVENT, NULL); + if (!ASSERT_GE(link_fd, 0, "link_fd")) + goto cleanup; + + memset(&info, 0, sizeof(info)); + err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len); + if (!ASSERT_OK(err, "link_get_info")) + goto cleanup; + + ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type"); + ASSERT_GT(info.id, 0, "link_id"); + ASSERT_GT(info.prog_id, 0, "link_prog_id"); + + /* ensure we get at least one perf_event prog execution */ + burn_cpu(); + ASSERT_GT(skel->bss->run_cnt, 0, "run_cnt"); + + /* perf_event is still active, but we close link and BPF program + * shouldn't be executed anymore + */ + close(link_fd); + link_fd = -1; + + /* make sure there are no stragglers */ + kern_sync_rcu(); + + run_cnt_before = skel->bss->run_cnt; + burn_cpu(); + run_cnt_after = skel->bss->run_cnt; + + ASSERT_EQ(run_cnt_before, run_cnt_after, "run_cnt_before_after"); + +cleanup: + if (link_fd >= 0) + close(link_fd); + if (pfd >= 0) + close(pfd); + test_perf_link__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c new file mode 100644 index 000000000..d95cee586 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> +#include <test_progs.h> + +__u32 get_map_id(struct bpf_object *obj, const char *name) +{ + struct bpf_map_info map_info = {}; + __u32 map_info_len, duration = 0; + struct bpf_map *map; + int err; + + map_info_len = sizeof(map_info); + + map = bpf_object__find_map_by_name(obj, name); + if (CHECK(!map, "find map", "NULL map")) + return 0; + + err = bpf_obj_get_info_by_fd(bpf_map__fd(map), + &map_info, &map_info_len); + CHECK(err, "get map info", "err %d errno %d", err, errno); + return map_info.id; +} + +void test_pinning(void) +{ + const char *file_invalid = "./test_pinning_invalid.bpf.o"; + const char *custpinpath = "/sys/fs/bpf/custom/pinmap"; + const char *nopinpath = "/sys/fs/bpf/nopinmap"; + const char *nopinpath2 = "/sys/fs/bpf/nopinmap2"; + const char *custpath = "/sys/fs/bpf/custom"; + const char *pinpath = "/sys/fs/bpf/pinmap"; + const char *file = "./test_pinning.bpf.o"; + __u32 map_id, map_id2, duration = 0; + struct stat statbuf = {}; + struct bpf_object *obj; + struct bpf_map *map; + int err, map_fd; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .pin_root_path = custpath, + ); + + /* check that opening fails with invalid pinning value in map def */ + obj = bpf_object__open_file(file_invalid, NULL); + err = libbpf_get_error(obj); + if (CHECK(err != -EINVAL, "invalid open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + /* open the valid object file */ + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "default load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that pinmap was pinned */ + err = stat(pinpath, &statbuf); + if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap was *not* pinned */ + err = stat(nopinpath, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath", + "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap2 was *not* pinned */ + err = stat(nopinpath2, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath2", + "err %d errno %d\n", err, errno)) + goto out; + + map_id = get_map_id(obj, "pinmap"); + if (!map_id) + goto out; + + bpf_object__close(obj); + + obj = bpf_object__open_file(file, NULL); + if (CHECK_FAIL(libbpf_get_error(obj))) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "default load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that same map ID was reused for second load */ + map_id2 = get_map_id(obj, "pinmap"); + if (CHECK(map_id != map_id2, "check reuse", + "err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2)) + goto out; + + /* should be no-op to re-pin same map */ + map = bpf_object__find_map_by_name(obj, "pinmap"); + if (CHECK(!map, "find map", "NULL map")) + goto out; + + err = bpf_map__pin(map, NULL); + if (CHECK(err, "re-pin map", "err %d errno %d\n", err, errno)) + goto out; + + /* but error to pin at different location */ + err = bpf_map__pin(map, "/sys/fs/bpf/other"); + if (CHECK(!err, "pin map different", "err %d errno %d\n", err, errno)) + goto out; + + /* unpin maps with a pin_path set */ + err = bpf_object__unpin_maps(obj, NULL); + if (CHECK(err, "unpin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* and re-pin them... */ + err = bpf_object__pin_maps(obj, NULL); + if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* get pinning path */ + if (!ASSERT_STREQ(bpf_map__pin_path(map), pinpath, "get pin path")) + goto out; + + /* set pinning path of other map and re-pin all */ + map = bpf_object__find_map_by_name(obj, "nopinmap"); + if (CHECK(!map, "find map", "NULL map")) + goto out; + + err = bpf_map__set_pin_path(map, custpinpath); + if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno)) + goto out; + + /* get pinning path after set */ + if (!ASSERT_STREQ(bpf_map__pin_path(map), custpinpath, + "get pin path after set")) + goto out; + + /* should only pin the one unpinned map */ + err = bpf_object__pin_maps(obj, NULL); + if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap was pinned at the custom path */ + err = stat(custpinpath, &statbuf); + if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + /* remove the custom pin path to re-test it with auto-pinning below */ + err = unlink(custpinpath); + if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + err = rmdir(custpath); + if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno)) + goto out; + + bpf_object__close(obj); + + /* open the valid object file again */ + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + /* set pin paths so that nopinmap2 will attempt to reuse the map at + * pinpath (which will fail), but not before pinmap has already been + * reused + */ + bpf_object__for_each_map(map, obj) { + if (!strcmp(bpf_map__name(map), "nopinmap")) + err = bpf_map__set_pin_path(map, nopinpath2); + else if (!strcmp(bpf_map__name(map), "nopinmap2")) + err = bpf_map__set_pin_path(map, pinpath); + else + continue; + + if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno)) + goto out; + } + + /* should fail because of map parameter mismatch */ + err = bpf_object__load(obj); + if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno)) + goto out; + + /* nopinmap2 should have been pinned and cleaned up again */ + err = stat(nopinpath2, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath2", + "err %d errno %d\n", err, errno)) + goto out; + + /* pinmap should still be there */ + err = stat(pinpath, &statbuf); + if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno)) + goto out; + + bpf_object__close(obj); + + /* test auto-pinning at custom path with open opt */ + obj = bpf_object__open_file(file, &opts); + if (CHECK_FAIL(libbpf_get_error(obj))) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "custom load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that pinmap was pinned at the custom path */ + err = stat(custpinpath, &statbuf); + if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + /* remove the custom pin path to re-test it with reuse fd below */ + err = unlink(custpinpath); + if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + err = rmdir(custpath); + if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno)) + goto out; + + bpf_object__close(obj); + + /* test pinning at custom path with reuse fd */ + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(__u32), + sizeof(__u64), 1, NULL); + if (CHECK(map_fd < 0, "create pinmap manually", "fd %d\n", map_fd)) + goto out; + + map = bpf_object__find_map_by_name(obj, "pinmap"); + if (CHECK(!map, "find map", "NULL map")) + goto close_map_fd; + + err = bpf_map__reuse_fd(map, map_fd); + if (CHECK(err, "reuse pinmap fd", "err %d errno %d\n", err, errno)) + goto close_map_fd; + + err = bpf_map__set_pin_path(map, custpinpath); + if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno)) + goto close_map_fd; + + err = bpf_object__load(obj); + if (CHECK(err, "custom load", "err %d errno %d\n", err, errno)) + goto close_map_fd; + + /* check that pinmap was pinned at the custom path */ + err = stat(custpinpath, &statbuf); + if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno)) + goto close_map_fd; + +close_map_fd: + close(map_fd); +out: + unlink(pinpath); + unlink(nopinpath); + unlink(nopinpath2); + unlink(custpinpath); + rmdir(custpath); + if (obj) + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c new file mode 100644 index 000000000..682e4ff45 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +void test_pkt_access(void) +{ + const char *file = "./test_pkt_access.bpf.o"; + struct bpf_object *obj; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 100000, + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv4 test_run_opts err"); + ASSERT_OK(topts.retval, "ipv4 test_run_opts retval"); + + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = 0; /* reset from last call */ + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6 test_run_opts err"); + ASSERT_OK(topts.retval, "ipv6 test_run_opts retval"); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c new file mode 100644 index 000000000..0d85e0642 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +void test_pkt_md_access(void) +{ + const char *file = "./test_pkt_md_access.bpf.o"; + struct bpf_object *obj; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10, + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run_opts err"); + ASSERT_OK(topts.retval, "test_run_opts retval"); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/probe_read_user_str.c b/tools/testing/selftests/bpf/prog_tests/probe_read_user_str.c new file mode 100644 index 000000000..e41929813 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/probe_read_user_str.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "test_probe_read_user_str.skel.h" + +static const char str1[] = "mestring"; +static const char str2[] = "mestringalittlebigger"; +static const char str3[] = "mestringblubblubblubblubblub"; + +static int test_one_str(struct test_probe_read_user_str *skel, const char *str, + size_t len) +{ + int err, duration = 0; + char buf[256]; + + /* Ensure bytes after string are ones */ + memset(buf, 1, sizeof(buf)); + memcpy(buf, str, len); + + /* Give prog our userspace pointer */ + skel->bss->user_ptr = buf; + + /* Trigger tracepoint */ + usleep(1); + + /* Did helper fail? */ + if (CHECK(skel->bss->ret < 0, "prog_ret", "prog returned: %ld\n", + skel->bss->ret)) + return 1; + + /* Check that string was copied correctly */ + err = memcmp(skel->bss->buf, str, len); + if (CHECK(err, "memcmp", "prog copied wrong string")) + return 1; + + /* Now check that no extra trailing bytes were copied */ + memset(buf, 0, sizeof(buf)); + err = memcmp(skel->bss->buf + len, buf, sizeof(buf) - len); + if (CHECK(err, "memcmp", "trailing bytes were not stripped")) + return 1; + + return 0; +} + +void test_probe_read_user_str(void) +{ + struct test_probe_read_user_str *skel; + int err, duration = 0; + + skel = test_probe_read_user_str__open_and_load(); + if (CHECK(!skel, "test_probe_read_user_str__open_and_load", + "skeleton open and load failed\n")) + return; + + /* Give pid to bpf prog so it doesn't read from anyone else */ + skel->bss->pid = getpid(); + + err = test_probe_read_user_str__attach(skel); + if (CHECK(err, "test_probe_read_user_str__attach", + "skeleton attach failed: %d\n", err)) + goto out; + + if (test_one_str(skel, str1, sizeof(str1))) + goto out; + if (test_one_str(skel, str2, sizeof(str2))) + goto out; + if (test_one_str(skel, str3, sizeof(str3))) + goto out; + +out: + test_probe_read_user_str__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c new file mode 100644 index 000000000..872167132 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +/* TODO: corrupts other tests uses connect() */ +void serial_test_probe_user(void) +{ + static const char *const prog_names[] = { + "handle_sys_connect", +#if defined(__s390x__) + "handle_sys_socketcall", +#endif + }; + enum { prog_count = ARRAY_SIZE(prog_names) }; + const char *obj_file = "./test_probe_user.bpf.o"; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, ); + int err, results_map_fd, sock_fd, duration = 0; + struct sockaddr curr, orig, tmp; + struct sockaddr_in *in = (struct sockaddr_in *)&curr; + struct bpf_link *kprobe_links[prog_count] = {}; + struct bpf_program *kprobe_progs[prog_count]; + struct bpf_object *obj; + static const int zero = 0; + size_t i; + + obj = bpf_object__open_file(obj_file, &opts); + if (!ASSERT_OK_PTR(obj, "obj_open_file")) + return; + + for (i = 0; i < prog_count; i++) { + kprobe_progs[i] = + bpf_object__find_program_by_name(obj, prog_names[i]); + if (CHECK(!kprobe_progs[i], "find_probe", + "prog '%s' not found\n", prog_names[i])) + goto cleanup; + } + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup; + + results_map_fd = bpf_find_map(__func__, obj, "test_pro.bss"); + if (CHECK(results_map_fd < 0, "find_bss_map", + "err %d\n", results_map_fd)) + goto cleanup; + + for (i = 0; i < prog_count; i++) { + kprobe_links[i] = bpf_program__attach(kprobe_progs[i]); + if (!ASSERT_OK_PTR(kprobe_links[i], "attach_kprobe")) + goto cleanup; + } + + memset(&curr, 0, sizeof(curr)); + in->sin_family = AF_INET; + in->sin_port = htons(5555); + in->sin_addr.s_addr = inet_addr("255.255.255.255"); + memcpy(&orig, &curr, sizeof(curr)); + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (CHECK(sock_fd < 0, "create_sock_fd", "err %d\n", sock_fd)) + goto cleanup; + + connect(sock_fd, &curr, sizeof(curr)); + close(sock_fd); + + err = bpf_map_lookup_elem(results_map_fd, &zero, &tmp); + if (CHECK(err, "get_kprobe_res", + "failed to get kprobe res: %d\n", err)) + goto cleanup; + + in = (struct sockaddr_in *)&tmp; + if (CHECK(memcmp(&tmp, &orig, sizeof(orig)), "check_kprobe_res", + "wrong kprobe res from probe read: %s:%u\n", + inet_ntoa(in->sin_addr), ntohs(in->sin_port))) + goto cleanup; + + memset(&tmp, 0xab, sizeof(tmp)); + + in = (struct sockaddr_in *)&curr; + if (CHECK(memcmp(&curr, &tmp, sizeof(tmp)), "check_kprobe_res", + "wrong kprobe res from probe write: %s:%u\n", + inet_ntoa(in->sin_addr), ntohs(in->sin_port))) + goto cleanup; +cleanup: + for (i = 0; i < prog_count; i++) + bpf_link__destroy(kprobe_links[i]); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/prog_array_init.c b/tools/testing/selftests/bpf/prog_tests/prog_array_init.c new file mode 100644 index 000000000..fc4657619 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_array_init.c @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include <test_progs.h> +#include "test_prog_array_init.skel.h" + +void test_prog_array_init(void) +{ + struct test_prog_array_init *skel; + int err; + + skel = test_prog_array_init__open(); + if (!ASSERT_OK_PTR(skel, "could not open BPF object")) + return; + + skel->rodata->my_pid = getpid(); + + err = test_prog_array_init__load(skel); + if (!ASSERT_OK(err, "could not load BPF object")) + goto cleanup; + + skel->links.entry = bpf_program__attach_raw_tracepoint(skel->progs.entry, "sys_enter"); + if (!ASSERT_OK_PTR(skel->links.entry, "could not attach BPF program")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->value, 42, "unexpected value"); + +cleanup: + test_prog_array_init__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c new file mode 100644 index 000000000..1ccd2bdf8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +#include "test_pkt_access.skel.h" + +static const __u32 duration; + +static void check_run_cnt(int prog_fd, __u64 run_cnt) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + int err; + + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd)) + return; + + CHECK(run_cnt != info.run_cnt, "run_cnt", + "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt); +} + +void test_prog_run_opts(void) +{ + struct test_pkt_access *skel; + int err, stats_fd = -1, prog_fd; + char buf[10] = {}; + __u64 run_cnt = 0; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .repeat = 1, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = 5, + ); + + stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME); + if (!ASSERT_GE(stats_fd, 0, "enable_stats good fd")) + return; + + skel = test_pkt_access__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.test_pkt_access); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_EQ(errno, ENOSPC, "test_run errno"); + ASSERT_ERR(err, "test_run"); + ASSERT_OK(topts.retval, "test_run retval"); + + ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), "test_run data_size_out"); + ASSERT_EQ(buf[5], 0, "overflow, BPF_PROG_TEST_RUN ignored size hint"); + + run_cnt += topts.repeat; + check_run_cnt(prog_fd, run_cnt); + + topts.data_out = NULL; + topts.data_size_out = 0; + topts.repeat = 2; + errno = 0; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(errno, "run_no_output errno"); + ASSERT_OK(err, "run_no_output err"); + ASSERT_OK(topts.retval, "run_no_output retval"); + + run_cnt += topts.repeat; + check_run_cnt(prog_fd, run_cnt); + +cleanup: + if (skel) + test_pkt_access__destroy(skel); + if (stats_fd >= 0) + close(stats_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c new file mode 100644 index 000000000..14f279607 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#include "test_progs.h" +#include "testing_helpers.h" + +static void clear_test_state(struct test_state *state) +{ + state->error_cnt = 0; + state->sub_succ_cnt = 0; + state->skip_cnt = 0; +} + +void test_prog_tests_framework(void) +{ + struct test_state *state = env.test_state; + + /* in all the ASSERT calls below we need to return on the first + * error due to the fact that we are cleaning the test state after + * each dummy subtest + */ + + /* test we properly count skipped tests with subtests */ + if (test__start_subtest("test_good_subtest")) + test__end_subtest(); + if (!ASSERT_EQ(state->skip_cnt, 0, "skip_cnt_check")) + return; + if (!ASSERT_EQ(state->error_cnt, 0, "error_cnt_check")) + return; + if (!ASSERT_EQ(state->subtest_num, 1, "subtest_num_check")) + return; + clear_test_state(state); + + if (test__start_subtest("test_skip_subtest")) { + test__skip(); + test__end_subtest(); + } + if (test__start_subtest("test_skip_subtest")) { + test__skip(); + test__end_subtest(); + } + if (!ASSERT_EQ(state->skip_cnt, 2, "skip_cnt_check")) + return; + if (!ASSERT_EQ(state->subtest_num, 3, "subtest_num_check")) + return; + clear_test_state(state); + + if (test__start_subtest("test_fail_subtest")) { + test__fail(); + test__end_subtest(); + } + if (!ASSERT_EQ(state->error_cnt, 1, "error_cnt_check")) + return; + if (!ASSERT_EQ(state->subtest_num, 4, "subtest_num_check")) + return; + clear_test_state(state); +} diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c new file mode 100644 index 000000000..722c5f2a7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +enum { + QUEUE, + STACK, +}; + +static void test_queue_stack_map_by_type(int type) +{ + const int MAP_SIZE = 32; + __u32 vals[MAP_SIZE], val; + int i, err, prog_fd, map_in_fd, map_out_fd; + char file[32], buf[128]; + struct bpf_object *obj; + struct iphdr iph; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); + + /* Fill test values to be used */ + for (i = 0; i < MAP_SIZE; i++) + vals[i] = rand(); + + if (type == QUEUE) + strncpy(file, "./test_queue_map.bpf.o", sizeof(file)); + else if (type == STACK) + strncpy(file, "./test_stack_map.bpf.o", sizeof(file)); + else + return; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + map_in_fd = bpf_find_map(__func__, obj, "map_in"); + if (map_in_fd < 0) + goto out; + + map_out_fd = bpf_find_map(__func__, obj, "map_out"); + if (map_out_fd < 0) + goto out; + + /* Push 32 elements to the input map */ + for (i = 0; i < MAP_SIZE; i++) { + err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0); + if (CHECK_FAIL(err)) + goto out; + } + + /* The eBPF program pushes iph.saddr in the output map, + * pops the input map and saves this value in iph.daddr + */ + for (i = 0; i < MAP_SIZE; i++) { + if (type == QUEUE) { + val = vals[i]; + pkt_v4.iph.saddr = vals[i] * 5; + } else if (type == STACK) { + val = vals[MAP_SIZE - 1 - i]; + pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5; + } + + topts.data_size_out = sizeof(buf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (err || topts.retval || + topts.data_size_out != sizeof(pkt_v4)) + break; + memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); + if (iph.daddr != val) + break; + } + + ASSERT_OK(err, "bpf_map_pop_elem"); + ASSERT_OK(topts.retval, "bpf_map_pop_elem test retval"); + ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), + "bpf_map_pop_elem data_size_out"); + ASSERT_EQ(iph.daddr, val, "bpf_map_pop_elem iph.daddr"); + + /* Queue is empty, program should return TC_ACT_SHOT */ + topts.data_size_out = sizeof(buf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "check-queue-stack-map-empty"); + ASSERT_EQ(topts.retval, 2 /* TC_ACT_SHOT */, + "check-queue-stack-map-empty test retval"); + ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), + "check-queue-stack-map-empty data_size_out"); + + /* Check that the program pushed elements correctly */ + for (i = 0; i < MAP_SIZE; i++) { + err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val); + ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"); + ASSERT_EQ(val, vals[i] * 5, "bpf_map_push_elem val"); + } +out: + pkt_v4.iph.saddr = 0; + bpf_object__close(obj); +} + +void test_queue_stack_map(void) +{ + test_queue_stack_map_by_type(QUEUE); + test_queue_stack_map_by_type(STACK); +} diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c new file mode 100644 index 000000000..fe5b8fae2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> +#include <linux/bpf.h> +#include "bpf/libbpf_internal.h" +#include "test_raw_tp_test_run.skel.h" + +void test_raw_tp_test_run(void) +{ + int comm_fd = -1, err, nr_online, i, prog_fd; + __u64 args[2] = {0x1234ULL, 0x5678ULL}; + int expected_retval = 0x1234 + 0x5678; + struct test_raw_tp_test_run *skel; + char buf[] = "new_name"; + bool *online = NULL; + LIBBPF_OPTS(bpf_test_run_opts, opts, + .ctx_in = args, + .ctx_size_in = sizeof(args), + .flags = BPF_F_TEST_RUN_ON_CPU, + ); + + err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online, + &nr_online); + if (!ASSERT_OK(err, "parse_cpu_mask_file")) + return; + + skel = test_raw_tp_test_run__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + err = test_raw_tp_test_run__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); + if (!ASSERT_GE(comm_fd, 0, "open /proc/self/comm")) + goto cleanup; + + err = write(comm_fd, buf, sizeof(buf)); + ASSERT_GE(err, 0, "task rename"); + + ASSERT_NEQ(skel->bss->count, 0, "check_count"); + ASSERT_EQ(skel->data->on_cpu, 0xffffffff, "check_on_cpu"); + + prog_fd = bpf_program__fd(skel->progs.rename); + opts.ctx_in = args; + opts.ctx_size_in = sizeof(__u64); + + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_NEQ(err, 0, "test_run should fail for too small ctx"); + + opts.ctx_size_in = sizeof(args); + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(opts.retval, expected_retval, "check_retval"); + + for (i = 0; i < nr_online; i++) { + if (!online[i]) + continue; + + opts.cpu = i; + opts.retval = 0; + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_OK(err, "test_run_opts"); + ASSERT_EQ(skel->data->on_cpu, i, "check_on_cpu"); + ASSERT_EQ(opts.retval, expected_retval, "check_retval"); + } + + /* invalid cpu ID should fail with ENXIO */ + opts.cpu = 0xffffffff; + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_EQ(errno, ENXIO, "test_run_opts should fail with ENXIO"); + ASSERT_ERR(err, "test_run_opts_fail"); + + /* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU should fail with EINVAL */ + opts.cpu = 1; + opts.flags = 0; + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_EQ(errno, EINVAL, "test_run_opts should fail with EINVAL"); + ASSERT_ERR(err, "test_run_opts_fail"); + +cleanup: + close(comm_fd); + test_raw_tp_test_run__destroy(skel); + free(online); +} diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c new file mode 100644 index 000000000..e2f1445b0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include <linux/nbd.h> + +void test_raw_tp_writable_reject_nbd_invalid(void) +{ + __u32 duration = 0; + char error[4096]; + int bpf_fd = -1, tp_fd = -1; + + const struct bpf_insn program[] = { + /* r6 is our tp buffer */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* one byte beyond the end of the nbd_request struct */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, + sizeof(struct nbd_request)), + BPF_EXIT_INSN(), + }; + + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_level = 2, + .log_buf = error, + .log_size = sizeof(error), + ); + + bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2", + program, sizeof(program) / sizeof(struct bpf_insn), + &opts); + if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load", + "failed: %d errno %d\n", bpf_fd, errno)) + return; + + tp_fd = bpf_raw_tracepoint_open("nbd_send_request", bpf_fd); + if (CHECK(tp_fd >= 0, "bpf_raw_tracepoint_writable open", + "erroneously succeeded\n")) + goto out_bpffd; + + close(tp_fd); +out_bpffd: + close(bpf_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c new file mode 100644 index 000000000..f4aa7dab4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include <linux/nbd.h> + +/* NOTE: conflict with other tests. */ +void serial_test_raw_tp_writable_test_run(void) +{ + __u32 duration = 0; + char error[4096]; + + const struct bpf_insn trace_program[] = { + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + LIBBPF_OPTS(bpf_prog_load_opts, trace_opts, + .log_level = 2, + .log_buf = error, + .log_size = sizeof(error), + ); + + int bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2", + trace_program, sizeof(trace_program) / sizeof(struct bpf_insn), + &trace_opts); + if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded", + "failed: %d errno %d\n", bpf_fd, errno)) + return; + + const struct bpf_insn skb_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + LIBBPF_OPTS(bpf_prog_load_opts, skb_opts, + .log_buf = error, + .log_size = sizeof(error), + ); + + int filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL v2", + skb_program, sizeof(skb_program) / sizeof(struct bpf_insn), + &skb_opts); + if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", + filter_fd, errno)) + goto out_bpffd; + + int tp_fd = bpf_raw_tracepoint_open("bpf_test_finish", bpf_fd); + if (CHECK(tp_fd < 0, "bpf_raw_tracepoint_writable opened", + "failed: %d errno %d\n", tp_fd, errno)) + goto out_filterfd; + + char test_skb[128] = { + 0, + }; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = test_skb, + .data_size_in = sizeof(test_skb), + .repeat = 1, + ); + int err = bpf_prog_test_run_opts(filter_fd, &topts); + CHECK(err != 42, "test_run", + "tracepoint did not modify return value\n"); + CHECK(topts.retval != 0, "test_run_ret", + "socket_filter did not return 0\n"); + + close(tp_fd); + + err = bpf_prog_test_run_opts(filter_fd, &topts); + CHECK(err != 0, "test_run_notrace", + "test_run failed with %d errno %d\n", err, errno); + CHECK(topts.retval != 0, "test_run_ret_notrace", + "socket_filter did not return 0\n"); + +out_filterfd: + close(filter_fd); +out_bpffd: + close(bpf_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c new file mode 100644 index 000000000..19e2f2526 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +struct bss { + unsigned did_run; + unsigned iters; + unsigned sum; +}; + +struct rdonly_map_subtest { + const char *subtest_name; + const char *prog_name; + unsigned exp_iters; + unsigned exp_sum; +}; + +void test_rdonly_maps(void) +{ + const char *file = "test_rdonly_maps.bpf.o"; + struct rdonly_map_subtest subtests[] = { + { "skip loop", "skip_loop", 0, 0 }, + { "part loop", "part_loop", 3, 2 + 3 + 4 }, + { "full loop", "full_loop", 4, 2 + 3 + 4 + 5 }, + }; + int i, err, zero = 0, duration = 0; + struct bpf_link *link = NULL; + struct bpf_program *prog; + struct bpf_map *bss_map; + struct bpf_object *obj; + struct bss bss; + + obj = bpf_object__open_file(file, NULL); + if (!ASSERT_OK_PTR(obj, "obj_open")) + return; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) + goto cleanup; + + bss_map = bpf_object__find_map_by_name(obj, ".bss"); + if (CHECK(!bss_map, "find_bss_map", "failed\n")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(subtests); i++) { + const struct rdonly_map_subtest *t = &subtests[i]; + + if (!test__start_subtest(t->subtest_name)) + continue; + + prog = bpf_object__find_program_by_name(obj, t->prog_name); + if (CHECK(!prog, "find_prog", "prog '%s' not found\n", + t->prog_name)) + goto cleanup; + + memset(&bss, 0, sizeof(bss)); + err = bpf_map_update_elem(bpf_map__fd(bss_map), &zero, &bss, 0); + if (CHECK(err, "set_bss", "failed to set bss data: %d\n", err)) + goto cleanup; + + link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + if (!ASSERT_OK_PTR(link, "attach_prog")) + goto cleanup; + + /* trigger probe */ + usleep(1); + + bpf_link__destroy(link); + link = NULL; + + err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, &bss); + if (CHECK(err, "get_bss", "failed to get bss data: %d\n", err)) + goto cleanup; + if (CHECK(bss.did_run == 0, "check_run", + "prog '%s' didn't run?\n", t->prog_name)) + goto cleanup; + if (CHECK(bss.iters != t->exp_iters, "check_iters", + "prog '%s' iters: %d, expected: %d\n", + t->prog_name, bss.iters, t->exp_iters)) + goto cleanup; + if (CHECK(bss.sum != t->exp_sum, "check_sum", + "prog '%s' sum: %d, expected: %d\n", + t->prog_name, bss.sum, t->exp_sum)) + goto cleanup; + } + +cleanup: + bpf_link__destroy(link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/recursion.c b/tools/testing/selftests/bpf/prog_tests/recursion.c new file mode 100644 index 000000000..f3af2627b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/recursion.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "recursion.skel.h" + +void test_recursion(void) +{ + struct bpf_prog_info prog_info = {}; + __u32 prog_info_len = sizeof(prog_info); + struct recursion *skel; + int key = 0; + int err; + + skel = recursion__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + err = recursion__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + ASSERT_EQ(skel->bss->pass1, 0, "pass1 == 0"); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key); + ASSERT_EQ(skel->bss->pass1, 1, "pass1 == 1"); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key); + ASSERT_EQ(skel->bss->pass1, 2, "pass1 == 2"); + + ASSERT_EQ(skel->bss->pass2, 0, "pass2 == 0"); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key); + ASSERT_EQ(skel->bss->pass2, 1, "pass2 == 1"); + bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key); + ASSERT_EQ(skel->bss->pass2, 2, "pass2 == 2"); + + err = bpf_obj_get_info_by_fd(bpf_program__fd(skel->progs.on_delete), + &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "get_prog_info")) + goto out; + ASSERT_EQ(prog_info.recursion_misses, 2, "recursion_misses"); +out: + recursion__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c new file mode 100644 index 000000000..d863205bb --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_reference_tracking(void) +{ + const char *file = "test_sk_lookup_kern.bpf.o"; + const char *obj_name = "ref_track"; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts, + .object_name = obj_name, + .relaxed_maps = true, + ); + struct bpf_object *obj_iter, *obj = NULL; + struct bpf_program *prog; + __u32 duration = 0; + int err = 0; + + obj_iter = bpf_object__open_file(file, &open_opts); + if (!ASSERT_OK_PTR(obj_iter, "obj_iter_open_file")) + return; + + if (CHECK(strcmp(bpf_object__name(obj_iter), obj_name), "obj_name", + "wrong obj name '%s', expected '%s'\n", + bpf_object__name(obj_iter), obj_name)) + goto cleanup; + + bpf_object__for_each_program(prog, obj_iter) { + struct bpf_program *p; + const char *name; + + name = bpf_program__name(prog); + if (!test__start_subtest(name)) + continue; + + obj = bpf_object__open_file(file, &open_opts); + if (!ASSERT_OK_PTR(obj, "obj_open_file")) + goto cleanup; + + /* all programs are not loaded by default, so just set + * autoload to true for the single prog under test + */ + p = bpf_object__find_program_by_name(obj, name); + bpf_program__set_autoload(p, true); + + /* Expect verifier failure if test name has 'err' */ + if (strncmp(name, "err_", sizeof("err_") - 1) == 0) { + libbpf_print_fn_t old_print_fn; + + old_print_fn = libbpf_set_print(NULL); + err = !bpf_object__load(obj); + libbpf_set_print(old_print_fn); + } else { + err = bpf_object__load(obj); + } + ASSERT_OK(err, name); + + bpf_object__close(obj); + obj = NULL; + } + +cleanup: + bpf_object__close(obj); + bpf_object__close(obj_iter); +} diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c new file mode 100644 index 000000000..f81d08d42 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/err.h> +#include <string.h> +#include <bpf/btf.h> +#include <bpf/libbpf.h> +#include <linux/btf.h> +#include <linux/kernel.h> +#define CONFIG_DEBUG_INFO_BTF +#include <linux/btf_ids.h> +#include "test_progs.h" + +static int duration; + +struct symbol { + const char *name; + int type; + int id; +}; + +struct symbol test_symbols[] = { + { "unused", BTF_KIND_UNKN, 0 }, + { "S", BTF_KIND_TYPEDEF, -1 }, + { "T", BTF_KIND_TYPEDEF, -1 }, + { "U", BTF_KIND_TYPEDEF, -1 }, + { "S", BTF_KIND_STRUCT, -1 }, + { "U", BTF_KIND_UNION, -1 }, + { "func", BTF_KIND_FUNC, -1 }, +}; + +/* Align the .BTF_ids section to 4 bytes */ +asm ( +".pushsection " BTF_IDS_SECTION " ,\"a\"; \n" +".balign 4, 0; \n" +".popsection; \n"); + +BTF_ID_LIST(test_list_local) +BTF_ID_UNUSED +BTF_ID(typedef, S) +BTF_ID(typedef, T) +BTF_ID(typedef, U) +BTF_ID(struct, S) +BTF_ID(union, U) +BTF_ID(func, func) + +extern __u32 test_list_global[]; +BTF_ID_LIST_GLOBAL(test_list_global, 1) +BTF_ID_UNUSED +BTF_ID(typedef, S) +BTF_ID(typedef, T) +BTF_ID(typedef, U) +BTF_ID(struct, S) +BTF_ID(union, U) +BTF_ID(func, func) + +BTF_SET_START(test_set) +BTF_ID(typedef, S) +BTF_ID(typedef, T) +BTF_ID(typedef, U) +BTF_ID(struct, S) +BTF_ID(union, U) +BTF_ID(func, func) +BTF_SET_END(test_set) + +static int +__resolve_symbol(struct btf *btf, int type_id) +{ + const struct btf_type *type; + const char *str; + unsigned int i; + + type = btf__type_by_id(btf, type_id); + if (!type) { + PRINT_FAIL("Failed to get type for ID %d\n", type_id); + return -1; + } + + for (i = 0; i < ARRAY_SIZE(test_symbols); i++) { + if (test_symbols[i].id >= 0) + continue; + + if (BTF_INFO_KIND(type->info) != test_symbols[i].type) + continue; + + str = btf__name_by_offset(btf, type->name_off); + if (!str) { + PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id); + return -1; + } + + if (!strcmp(str, test_symbols[i].name)) + test_symbols[i].id = type_id; + } + + return 0; +} + +static int resolve_symbols(void) +{ + struct btf *btf; + int type_id; + __u32 nr; + + btf = btf__parse_elf("btf_data.bpf.o", NULL); + if (CHECK(libbpf_get_error(btf), "resolve", + "Failed to load BTF from btf_data.o\n")) + return -1; + + nr = btf__type_cnt(btf); + + for (type_id = 1; type_id < nr; type_id++) { + if (__resolve_symbol(btf, type_id)) + break; + } + + btf__free(btf); + return 0; +} + +void test_resolve_btfids(void) +{ + __u32 *test_list, *test_lists[] = { test_list_local, test_list_global }; + unsigned int i, j; + int ret = 0; + + if (resolve_symbols()) + return; + + /* Check BTF_ID_LIST(test_list_local) and + * BTF_ID_LIST_GLOBAL(test_list_global) IDs + */ + for (j = 0; j < ARRAY_SIZE(test_lists); j++) { + test_list = test_lists[j]; + for (i = 0; i < ARRAY_SIZE(test_symbols); i++) { + ret = CHECK(test_list[i] != test_symbols[i].id, + "id_check", + "wrong ID for %s (%d != %d)\n", + test_symbols[i].name, + test_list[i], test_symbols[i].id); + if (ret) + return; + } + } + + /* Check BTF_SET_START(test_set) IDs */ + for (i = 0; i < test_set.cnt; i++) { + bool found = false; + + for (j = 0; j < ARRAY_SIZE(test_symbols); j++) { + if (test_symbols[j].id != test_set.ids[i]) + continue; + found = true; + break; + } + + ret = CHECK(!found, "id_check", + "ID %d not found in test_symbols\n", + test_set.ids[i]); + if (ret) + break; + + if (i > 0) { + if (!ASSERT_LE(test_set.ids[i - 1], test_set.ids[i], "sort_check")) + return; + } + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c new file mode 100644 index 000000000..9a80fe8a6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <linux/compiler.h> +#include <asm/barrier.h> +#include <test_progs.h> +#include <sys/mman.h> +#include <sys/epoll.h> +#include <time.h> +#include <sched.h> +#include <signal.h> +#include <pthread.h> +#include <sys/sysinfo.h> +#include <linux/perf_event.h> +#include <linux/ring_buffer.h> +#include "test_ringbuf.lskel.h" + +#define EDONE 7777 + +static int duration = 0; + +struct sample { + int pid; + int seq; + long value; + char comm[16]; +}; + +static int sample_cnt; + +static void atomic_inc(int *cnt) +{ + __atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST); +} + +static int atomic_xchg(int *cnt, int val) +{ + return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST); +} + +static int process_sample(void *ctx, void *data, size_t len) +{ + struct sample *s = data; + + atomic_inc(&sample_cnt); + + switch (s->seq) { + case 0: + CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n", + 333L, s->value); + return 0; + case 1: + CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n", + 777L, s->value); + return -EDONE; + default: + /* we don't care about the rest */ + return 0; + } +} + +static struct test_ringbuf_lskel *skel; +static struct ring_buffer *ringbuf; + +static void trigger_samples() +{ + skel->bss->dropped = 0; + skel->bss->total = 0; + skel->bss->discarded = 0; + + /* trigger exactly two samples */ + skel->bss->value = 333; + syscall(__NR_getpgid); + skel->bss->value = 777; + syscall(__NR_getpgid); +} + +static void *poll_thread(void *input) +{ + long timeout = (long)input; + + return (void *)(long)ring_buffer__poll(ringbuf, timeout); +} + +void test_ringbuf(void) +{ + const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample); + pthread_t thread; + long bg_ret = -1; + int err, cnt, rb_fd; + int page_size = getpagesize(); + void *mmap_ptr, *tmp_ptr; + + skel = test_ringbuf_lskel__open(); + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + return; + + skel->maps.ringbuf.max_entries = page_size; + + err = test_ringbuf_lskel__load(skel); + if (CHECK(err != 0, "skel_load", "skeleton load failed\n")) + goto cleanup; + + rb_fd = skel->maps.ringbuf.map_fd; + /* good read/write cons_pos */ + mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0); + ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"); + tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE); + if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend")) + goto cleanup; + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw"); + + /* bad writeable prod_pos */ + mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size); + err = -errno; + ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos"); + ASSERT_EQ(err, -EPERM, "wr_prod_pos_err"); + + /* bad writeable data pages */ + mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); + err = -errno; + ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one"); + ASSERT_EQ(err, -EPERM, "wr_data_page_one_err"); + mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size); + ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two"); + mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); + ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all"); + + /* good read-only pages */ + mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0); + if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos")) + goto cleanup; + + ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect"); + ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect"); + ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap"); + ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro"); + + /* good read-only pages with initial offset */ + mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size); + if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos")) + goto cleanup; + + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect"); + ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro"); + + /* only trigger BPF program for current process */ + skel->bss->pid = getpid(); + + ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd, + process_sample, NULL, NULL); + if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n")) + goto cleanup; + + err = test_ringbuf_lskel__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err)) + goto cleanup; + + trigger_samples(); + + /* 2 submitted + 1 discarded records */ + CHECK(skel->bss->avail_data != 3 * rec_sz, + "err_avail_size", "exp %ld, got %ld\n", + 3L * rec_sz, skel->bss->avail_data); + CHECK(skel->bss->ring_size != page_size, + "err_ring_size", "exp %ld, got %ld\n", + (long)page_size, skel->bss->ring_size); + CHECK(skel->bss->cons_pos != 0, + "err_cons_pos", "exp %ld, got %ld\n", + 0L, skel->bss->cons_pos); + CHECK(skel->bss->prod_pos != 3 * rec_sz, + "err_prod_pos", "exp %ld, got %ld\n", + 3L * rec_sz, skel->bss->prod_pos); + + /* poll for samples */ + err = ring_buffer__poll(ringbuf, -1); + + /* -EDONE is used as an indicator that we are done */ + if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err)) + goto cleanup; + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt); + + /* we expect extra polling to return nothing */ + err = ring_buffer__poll(ringbuf, 0); + if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err)) + goto cleanup; + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt); + + CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", + 0L, skel->bss->dropped); + CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n", + 2L, skel->bss->total); + CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n", + 1L, skel->bss->discarded); + + /* now validate consumer position is updated and returned */ + trigger_samples(); + CHECK(skel->bss->cons_pos != 3 * rec_sz, + "err_cons_pos", "exp %ld, got %ld\n", + 3L * rec_sz, skel->bss->cons_pos); + err = ring_buffer__poll(ringbuf, -1); + CHECK(err <= 0, "poll_err", "err %d\n", err); + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt); + + /* start poll in background w/ long timeout */ + err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000); + if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err)) + goto cleanup; + + /* turn off notifications now */ + skel->bss->flags = BPF_RB_NO_WAKEUP; + + /* give background thread a bit of a time */ + usleep(50000); + trigger_samples(); + /* sleeping arbitrarily is bad, but no better way to know that + * epoll_wait() **DID NOT** unblock in background thread + */ + usleep(50000); + /* background poll should still be blocked */ + err = pthread_tryjoin_np(thread, (void **)&bg_ret); + if (CHECK(err != EBUSY, "try_join", "err %d\n", err)) + goto cleanup; + + /* BPF side did everything right */ + CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", + 0L, skel->bss->dropped); + CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n", + 2L, skel->bss->total); + CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n", + 1L, skel->bss->discarded); + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt); + + /* clear flags to return to "adaptive" notification mode */ + skel->bss->flags = 0; + + /* produce new samples, no notification should be triggered, because + * consumer is now behind + */ + trigger_samples(); + + /* background poll should still be blocked */ + err = pthread_tryjoin_np(thread, (void **)&bg_ret); + if (CHECK(err != EBUSY, "try_join", "err %d\n", err)) + goto cleanup; + + /* still no samples, because consumer is behind */ + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt); + + skel->bss->dropped = 0; + skel->bss->total = 0; + skel->bss->discarded = 0; + + skel->bss->value = 333; + syscall(__NR_getpgid); + /* now force notifications */ + skel->bss->flags = BPF_RB_FORCE_WAKEUP; + skel->bss->value = 777; + syscall(__NR_getpgid); + + /* now we should get a pending notification */ + usleep(50000); + err = pthread_tryjoin_np(thread, (void **)&bg_ret); + if (CHECK(err, "join_bg", "err %d\n", err)) + goto cleanup; + + if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret)) + goto cleanup; + + /* due to timing variations, there could still be non-notified + * samples, so consume them here to collect all the samples + */ + err = ring_buffer__consume(ringbuf); + CHECK(err < 0, "rb_consume", "failed: %d\b", err); + + /* 3 rounds, 2 samples each */ + cnt = atomic_xchg(&sample_cnt, 0); + CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt); + + /* BPF side did everything right */ + CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", + 0L, skel->bss->dropped); + CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n", + 2L, skel->bss->total); + CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n", + 1L, skel->bss->discarded); + + test_ringbuf_lskel__detach(skel); +cleanup: + ring_buffer__free(ringbuf); + test_ringbuf_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c new file mode 100644 index 000000000..1455911d9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <test_progs.h> +#include <sys/epoll.h> +#include "test_ringbuf_multi.skel.h" + +static int duration = 0; + +struct sample { + int pid; + int seq; + long value; + char comm[16]; +}; + +static int process_sample(void *ctx, void *data, size_t len) +{ + int ring = (unsigned long)ctx; + struct sample *s = data; + + switch (s->seq) { + case 0: + CHECK(ring != 1, "sample1_ring", "exp %d, got %d\n", 1, ring); + CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n", + 333L, s->value); + break; + case 1: + CHECK(ring != 2, "sample2_ring", "exp %d, got %d\n", 2, ring); + CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n", + 777L, s->value); + break; + default: + CHECK(true, "extra_sample", "unexpected sample seq %d, val %ld\n", + s->seq, s->value); + return -1; + } + + return 0; +} + +void test_ringbuf_multi(void) +{ + struct test_ringbuf_multi *skel; + struct ring_buffer *ringbuf = NULL; + int err; + int page_size = getpagesize(); + int proto_fd = -1; + + skel = test_ringbuf_multi__open(); + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + return; + + /* validate ringbuf size adjustment logic */ + ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_before"); + ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size + 1), "rb1_resize"); + ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), 2 * page_size, "rb1_size_after"); + ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size), "rb1_reset"); + ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_final"); + + proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL); + if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n")) + goto cleanup; + + err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd); + if (CHECK(err != 0, "bpf_map__set_inner_map_fd", "bpf_map__set_inner_map_fd failed\n")) + goto cleanup; + + err = test_ringbuf_multi__load(skel); + if (CHECK(err != 0, "skel_load", "skeleton load failed\n")) + goto cleanup; + + close(proto_fd); + proto_fd = -1; + + /* make sure we can't resize ringbuf after object load */ + if (!ASSERT_ERR(bpf_map__set_max_entries(skel->maps.ringbuf1, 3 * page_size), "rb1_resize_after_load")) + goto cleanup; + + /* only trigger BPF program for current process */ + skel->bss->pid = getpid(); + + ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf1), + process_sample, (void *)(long)1, NULL); + if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n")) + goto cleanup; + + err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2), + process_sample, (void *)(long)2); + if (CHECK(err, "ringbuf_add", "failed to add another ring\n")) + goto cleanup; + + err = test_ringbuf_multi__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err)) + goto cleanup; + + /* trigger few samples, some will be skipped */ + skel->bss->target_ring = 0; + skel->bss->value = 333; + syscall(__NR_getpgid); + + /* skipped, no ringbuf in slot 1 */ + skel->bss->target_ring = 1; + skel->bss->value = 555; + syscall(__NR_getpgid); + + skel->bss->target_ring = 2; + skel->bss->value = 777; + syscall(__NR_getpgid); + + /* poll for samples, should get 2 ringbufs back */ + err = ring_buffer__poll(ringbuf, -1); + if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err)) + goto cleanup; + + /* expect extra polling to return nothing */ + err = ring_buffer__poll(ringbuf, 0); + if (CHECK(err < 0, "extra_samples", "poll result: %d\n", err)) + goto cleanup; + + CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n", + 0L, skel->bss->dropped); + CHECK(skel->bss->skipped != 1, "err_skipped", "exp %ld, got %ld\n", + 1L, skel->bss->skipped); + CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n", + 2L, skel->bss->total); + +cleanup: + if (proto_fd >= 0) + close(proto_fd); + ring_buffer__free(ringbuf); + test_ringbuf_multi__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/section_names.c b/tools/testing/selftests/bpf/prog_tests/section_names.c new file mode 100644 index 000000000..8b571890c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/section_names.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook +#include <test_progs.h> + +static int duration = 0; + +struct sec_name_test { + const char sec_name[32]; + struct { + int rc; + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + } expected_load; + struct { + int rc; + enum bpf_attach_type attach_type; + } expected_attach; +}; + +static struct sec_name_test tests[] = { + {"InvAliD", {-ESRCH, 0, 0}, {-EINVAL, 0} }, + {"cgroup", {-ESRCH, 0, 0}, {-EINVAL, 0} }, + {"socket", {0, BPF_PROG_TYPE_SOCKET_FILTER, 0}, {-EINVAL, 0} }, + {"kprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"uprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"kretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"uretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} }, + {"classifier", {0, BPF_PROG_TYPE_SCHED_CLS, 0}, {-EINVAL, 0} }, + {"action", {0, BPF_PROG_TYPE_SCHED_ACT, 0}, {-EINVAL, 0} }, + {"tracepoint/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} }, + {"tp/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} }, + { + "raw_tracepoint/", + {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, + {-EINVAL, 0}, + }, + {"raw_tp/", {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, {-EINVAL, 0} }, + {"xdp", {0, BPF_PROG_TYPE_XDP, BPF_XDP}, {0, BPF_XDP} }, + {"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} }, + {"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} }, + {"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} }, + {"lwt_xmit", {0, BPF_PROG_TYPE_LWT_XMIT, 0}, {-EINVAL, 0} }, + {"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} }, + { + "cgroup_skb/ingress", + {0, BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_INGRESS}, + {0, BPF_CGROUP_INET_INGRESS}, + }, + { + "cgroup_skb/egress", + {0, BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS}, + {0, BPF_CGROUP_INET_EGRESS}, + }, + {"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} }, + { + "cgroup/sock", + {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE}, + {0, BPF_CGROUP_INET_SOCK_CREATE}, + }, + { + "cgroup/post_bind4", + {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND}, + {0, BPF_CGROUP_INET4_POST_BIND}, + }, + { + "cgroup/post_bind6", + {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND}, + {0, BPF_CGROUP_INET6_POST_BIND}, + }, + { + "cgroup/dev", + {0, BPF_PROG_TYPE_CGROUP_DEVICE, BPF_CGROUP_DEVICE}, + {0, BPF_CGROUP_DEVICE}, + }, + { + "sockops", + {0, BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS}, + {0, BPF_CGROUP_SOCK_OPS}, + }, + { + "sk_skb/stream_parser", + {0, BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_PARSER}, + {0, BPF_SK_SKB_STREAM_PARSER}, + }, + { + "sk_skb/stream_verdict", + {0, BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_VERDICT}, + {0, BPF_SK_SKB_STREAM_VERDICT}, + }, + {"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} }, + { + "sk_msg", + {0, BPF_PROG_TYPE_SK_MSG, BPF_SK_MSG_VERDICT}, + {0, BPF_SK_MSG_VERDICT}, + }, + { + "lirc_mode2", + {0, BPF_PROG_TYPE_LIRC_MODE2, BPF_LIRC_MODE2}, + {0, BPF_LIRC_MODE2}, + }, + { + "flow_dissector", + {0, BPF_PROG_TYPE_FLOW_DISSECTOR, BPF_FLOW_DISSECTOR}, + {0, BPF_FLOW_DISSECTOR}, + }, + { + "cgroup/bind4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND}, + {0, BPF_CGROUP_INET4_BIND}, + }, + { + "cgroup/bind6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND}, + {0, BPF_CGROUP_INET6_BIND}, + }, + { + "cgroup/connect4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT}, + {0, BPF_CGROUP_INET4_CONNECT}, + }, + { + "cgroup/connect6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT}, + {0, BPF_CGROUP_INET6_CONNECT}, + }, + { + "cgroup/sendmsg4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG}, + {0, BPF_CGROUP_UDP4_SENDMSG}, + }, + { + "cgroup/sendmsg6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG}, + {0, BPF_CGROUP_UDP6_SENDMSG}, + }, + { + "cgroup/recvmsg4", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG}, + {0, BPF_CGROUP_UDP4_RECVMSG}, + }, + { + "cgroup/recvmsg6", + {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG}, + {0, BPF_CGROUP_UDP6_RECVMSG}, + }, + { + "cgroup/sysctl", + {0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL}, + {0, BPF_CGROUP_SYSCTL}, + }, + { + "cgroup/getsockopt", + {0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT}, + {0, BPF_CGROUP_GETSOCKOPT}, + }, + { + "cgroup/setsockopt", + {0, BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT}, + {0, BPF_CGROUP_SETSOCKOPT}, + }, +}; + +static void test_prog_type_by_name(const struct sec_name_test *test) +{ + enum bpf_attach_type expected_attach_type; + enum bpf_prog_type prog_type; + int rc; + + rc = libbpf_prog_type_by_name(test->sec_name, &prog_type, + &expected_attach_type); + + CHECK(rc != test->expected_load.rc, "check_code", + "prog: unexpected rc=%d for %s\n", rc, test->sec_name); + + if (rc) + return; + + CHECK(prog_type != test->expected_load.prog_type, "check_prog_type", + "prog: unexpected prog_type=%d for %s\n", + prog_type, test->sec_name); + + CHECK(expected_attach_type != test->expected_load.expected_attach_type, + "check_attach_type", "prog: unexpected expected_attach_type=%d for %s\n", + expected_attach_type, test->sec_name); +} + +static void test_attach_type_by_name(const struct sec_name_test *test) +{ + enum bpf_attach_type attach_type; + int rc; + + rc = libbpf_attach_type_by_name(test->sec_name, &attach_type); + + CHECK(rc != test->expected_attach.rc, "check_ret", + "attach: unexpected rc=%d for %s\n", rc, test->sec_name); + + if (rc) + return; + + CHECK(attach_type != test->expected_attach.attach_type, + "check_attach_type", "attach: unexpected attach_type=%d for %s\n", + attach_type, test->sec_name); +} + +void test_section_names(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tests); ++i) { + struct sec_name_test *test = &tests[i]; + + test_prog_type_by_name(test); + test_attach_type_by_name(test); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c new file mode 100644 index 000000000..64c5f5eb2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c @@ -0,0 +1,870 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Facebook */ + +#include <stdlib.h> +#include <unistd.h> +#include <stdbool.h> +#include <string.h> +#include <errno.h> +#include <assert.h> +#include <fcntl.h> +#include <linux/bpf.h> +#include <linux/err.h> +#include <linux/types.h> +#include <linux/if_ether.h> +#include <sys/types.h> +#include <sys/epoll.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> +#include "bpf_util.h" + +#include "test_progs.h" +#include "test_select_reuseport_common.h" + +#define MAX_TEST_NAME 80 +#define MIN_TCPHDR_LEN 20 +#define UDPHDR_LEN 8 + +#define TCP_SYNCOOKIE_SYSCTL "/proc/sys/net/ipv4/tcp_syncookies" +#define TCP_FO_SYSCTL "/proc/sys/net/ipv4/tcp_fastopen" +#define REUSEPORT_ARRAY_SIZE 32 + +static int result_map, tmp_index_ovr_map, linum_map, data_check_map; +static __u32 expected_results[NR_RESULTS]; +static int sk_fds[REUSEPORT_ARRAY_SIZE]; +static int reuseport_array = -1, outer_map = -1; +static enum bpf_map_type inner_map_type; +static int select_by_skb_data_prog; +static int saved_tcp_syncookie = -1; +static struct bpf_object *obj; +static int saved_tcp_fo = -1; +static __u32 index_zero; +static int epfd; + +static union sa46 { + struct sockaddr_in6 v6; + struct sockaddr_in v4; + sa_family_t family; +} srv_sa; + +#define RET_IF(condition, tag, format...) ({ \ + if (CHECK_FAIL(condition)) { \ + printf(tag " " format); \ + return; \ + } \ +}) + +#define RET_ERR(condition, tag, format...) ({ \ + if (CHECK_FAIL(condition)) { \ + printf(tag " " format); \ + return -1; \ + } \ +}) + +static int create_maps(enum bpf_map_type inner_type) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts); + + inner_map_type = inner_type; + + /* Creating reuseport_array */ + reuseport_array = bpf_map_create(inner_type, "reuseport_array", + sizeof(__u32), sizeof(__u32), REUSEPORT_ARRAY_SIZE, NULL); + RET_ERR(reuseport_array < 0, "creating reuseport_array", + "reuseport_array:%d errno:%d\n", reuseport_array, errno); + + /* Creating outer_map */ + opts.inner_map_fd = reuseport_array; + outer_map = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, "outer_map", + sizeof(__u32), sizeof(__u32), 1, &opts); + RET_ERR(outer_map < 0, "creating outer_map", + "outer_map:%d errno:%d\n", outer_map, errno); + + return 0; +} + +static int prepare_bpf_obj(void) +{ + struct bpf_program *prog; + struct bpf_map *map; + int err; + + obj = bpf_object__open("test_select_reuseport_kern.bpf.o"); + err = libbpf_get_error(obj); + RET_ERR(err, "open test_select_reuseport_kern.bpf.o", + "obj:%p PTR_ERR(obj):%d\n", obj, err); + + map = bpf_object__find_map_by_name(obj, "outer_map"); + RET_ERR(!map, "find outer_map", "!map\n"); + err = bpf_map__reuse_fd(map, outer_map); + RET_ERR(err, "reuse outer_map", "err:%d\n", err); + + err = bpf_object__load(obj); + RET_ERR(err, "load bpf_object", "err:%d\n", err); + + prog = bpf_object__next_program(obj, NULL); + RET_ERR(!prog, "get first bpf_program", "!prog\n"); + select_by_skb_data_prog = bpf_program__fd(prog); + RET_ERR(select_by_skb_data_prog < 0, "get prog fd", + "select_by_skb_data_prog:%d\n", select_by_skb_data_prog); + + map = bpf_object__find_map_by_name(obj, "result_map"); + RET_ERR(!map, "find result_map", "!map\n"); + result_map = bpf_map__fd(map); + RET_ERR(result_map < 0, "get result_map fd", + "result_map:%d\n", result_map); + + map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map"); + RET_ERR(!map, "find tmp_index_ovr_map\n", "!map"); + tmp_index_ovr_map = bpf_map__fd(map); + RET_ERR(tmp_index_ovr_map < 0, "get tmp_index_ovr_map fd", + "tmp_index_ovr_map:%d\n", tmp_index_ovr_map); + + map = bpf_object__find_map_by_name(obj, "linum_map"); + RET_ERR(!map, "find linum_map", "!map\n"); + linum_map = bpf_map__fd(map); + RET_ERR(linum_map < 0, "get linum_map fd", + "linum_map:%d\n", linum_map); + + map = bpf_object__find_map_by_name(obj, "data_check_map"); + RET_ERR(!map, "find data_check_map", "!map\n"); + data_check_map = bpf_map__fd(map); + RET_ERR(data_check_map < 0, "get data_check_map fd", + "data_check_map:%d\n", data_check_map); + + return 0; +} + +static void sa46_init_loopback(union sa46 *sa, sa_family_t family) +{ + memset(sa, 0, sizeof(*sa)); + sa->family = family; + if (sa->family == AF_INET6) + sa->v6.sin6_addr = in6addr_loopback; + else + sa->v4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); +} + +static void sa46_init_inany(union sa46 *sa, sa_family_t family) +{ + memset(sa, 0, sizeof(*sa)); + sa->family = family; + if (sa->family == AF_INET6) + sa->v6.sin6_addr = in6addr_any; + else + sa->v4.sin_addr.s_addr = INADDR_ANY; +} + +static int read_int_sysctl(const char *sysctl) +{ + char buf[16]; + int fd, ret; + + fd = open(sysctl, 0); + RET_ERR(fd == -1, "open(sysctl)", + "sysctl:%s fd:%d errno:%d\n", sysctl, fd, errno); + + ret = read(fd, buf, sizeof(buf)); + RET_ERR(ret <= 0, "read(sysctl)", + "sysctl:%s ret:%d errno:%d\n", sysctl, ret, errno); + + close(fd); + return atoi(buf); +} + +static int write_int_sysctl(const char *sysctl, int v) +{ + int fd, ret, size; + char buf[16]; + + fd = open(sysctl, O_RDWR); + RET_ERR(fd == -1, "open(sysctl)", + "sysctl:%s fd:%d errno:%d\n", sysctl, fd, errno); + + size = snprintf(buf, sizeof(buf), "%d", v); + ret = write(fd, buf, size); + RET_ERR(ret != size, "write(sysctl)", + "sysctl:%s ret:%d size:%d errno:%d\n", + sysctl, ret, size, errno); + + close(fd); + return 0; +} + +static void restore_sysctls(void) +{ + if (saved_tcp_fo != -1) + write_int_sysctl(TCP_FO_SYSCTL, saved_tcp_fo); + if (saved_tcp_syncookie != -1) + write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, saved_tcp_syncookie); +} + +static int enable_fastopen(void) +{ + int fo; + + fo = read_int_sysctl(TCP_FO_SYSCTL); + if (fo < 0) + return -1; + + return write_int_sysctl(TCP_FO_SYSCTL, fo | 7); +} + +static int enable_syncookie(void) +{ + return write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 2); +} + +static int disable_syncookie(void) +{ + return write_int_sysctl(TCP_SYNCOOKIE_SYSCTL, 0); +} + +static long get_linum(void) +{ + __u32 linum; + int err; + + err = bpf_map_lookup_elem(linum_map, &index_zero, &linum); + RET_ERR(err < 0, "lookup_elem(linum_map)", "err:%d errno:%d\n", + err, errno); + + return linum; +} + +static void check_data(int type, sa_family_t family, const struct cmd *cmd, + int cli_fd) +{ + struct data_check expected = {}, result; + union sa46 cli_sa; + socklen_t addrlen; + int err; + + addrlen = sizeof(cli_sa); + err = getsockname(cli_fd, (struct sockaddr *)&cli_sa, + &addrlen); + RET_IF(err < 0, "getsockname(cli_fd)", "err:%d errno:%d\n", + err, errno); + + err = bpf_map_lookup_elem(data_check_map, &index_zero, &result); + RET_IF(err < 0, "lookup_elem(data_check_map)", "err:%d errno:%d\n", + err, errno); + + if (type == SOCK_STREAM) { + expected.len = MIN_TCPHDR_LEN; + expected.ip_protocol = IPPROTO_TCP; + } else { + expected.len = UDPHDR_LEN; + expected.ip_protocol = IPPROTO_UDP; + } + + if (family == AF_INET6) { + expected.eth_protocol = htons(ETH_P_IPV6); + expected.bind_inany = !srv_sa.v6.sin6_addr.s6_addr32[3] && + !srv_sa.v6.sin6_addr.s6_addr32[2] && + !srv_sa.v6.sin6_addr.s6_addr32[1] && + !srv_sa.v6.sin6_addr.s6_addr32[0]; + + memcpy(&expected.skb_addrs[0], cli_sa.v6.sin6_addr.s6_addr32, + sizeof(cli_sa.v6.sin6_addr)); + memcpy(&expected.skb_addrs[4], &in6addr_loopback, + sizeof(in6addr_loopback)); + expected.skb_ports[0] = cli_sa.v6.sin6_port; + expected.skb_ports[1] = srv_sa.v6.sin6_port; + } else { + expected.eth_protocol = htons(ETH_P_IP); + expected.bind_inany = !srv_sa.v4.sin_addr.s_addr; + + expected.skb_addrs[0] = cli_sa.v4.sin_addr.s_addr; + expected.skb_addrs[1] = htonl(INADDR_LOOPBACK); + expected.skb_ports[0] = cli_sa.v4.sin_port; + expected.skb_ports[1] = srv_sa.v4.sin_port; + } + + if (memcmp(&result, &expected, offsetof(struct data_check, + equal_check_end))) { + printf("unexpected data_check\n"); + printf(" result: (0x%x, %u, %u)\n", + result.eth_protocol, result.ip_protocol, + result.bind_inany); + printf("expected: (0x%x, %u, %u)\n", + expected.eth_protocol, expected.ip_protocol, + expected.bind_inany); + RET_IF(1, "data_check result != expected", + "bpf_prog_linum:%ld\n", get_linum()); + } + + RET_IF(!result.hash, "data_check result.hash empty", + "result.hash:%u", result.hash); + + expected.len += cmd ? sizeof(*cmd) : 0; + if (type == SOCK_STREAM) + RET_IF(expected.len > result.len, "expected.len > result.len", + "expected.len:%u result.len:%u bpf_prog_linum:%ld\n", + expected.len, result.len, get_linum()); + else + RET_IF(expected.len != result.len, "expected.len != result.len", + "expected.len:%u result.len:%u bpf_prog_linum:%ld\n", + expected.len, result.len, get_linum()); +} + +static const char *result_to_str(enum result res) +{ + switch (res) { + case DROP_ERR_INNER_MAP: + return "DROP_ERR_INNER_MAP"; + case DROP_ERR_SKB_DATA: + return "DROP_ERR_SKB_DATA"; + case DROP_ERR_SK_SELECT_REUSEPORT: + return "DROP_ERR_SK_SELECT_REUSEPORT"; + case DROP_MISC: + return "DROP_MISC"; + case PASS: + return "PASS"; + case PASS_ERR_SK_SELECT_REUSEPORT: + return "PASS_ERR_SK_SELECT_REUSEPORT"; + default: + return "UNKNOWN"; + } +} + +static void check_results(void) +{ + __u32 results[NR_RESULTS]; + __u32 i, broken = 0; + int err; + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_lookup_elem(result_map, &i, &results[i]); + RET_IF(err < 0, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); + } + + for (i = 0; i < NR_RESULTS; i++) { + if (results[i] != expected_results[i]) { + broken = i; + break; + } + } + + if (i == NR_RESULTS) + return; + + printf("unexpected result\n"); + printf(" result: ["); + printf("%u", results[0]); + for (i = 1; i < NR_RESULTS; i++) + printf(", %u", results[i]); + printf("]\n"); + + printf("expected: ["); + printf("%u", expected_results[0]); + for (i = 1; i < NR_RESULTS; i++) + printf(", %u", expected_results[i]); + printf("]\n"); + + printf("mismatch on %s (bpf_prog_linum:%ld)\n", result_to_str(broken), + get_linum()); + + CHECK_FAIL(true); +} + +static int send_data(int type, sa_family_t family, void *data, size_t len, + enum result expected) +{ + union sa46 cli_sa; + int fd, err; + + fd = socket(family, type, 0); + RET_ERR(fd == -1, "socket()", "fd:%d errno:%d\n", fd, errno); + + sa46_init_loopback(&cli_sa, family); + err = bind(fd, (struct sockaddr *)&cli_sa, sizeof(cli_sa)); + RET_ERR(fd == -1, "bind(cli_sa)", "err:%d errno:%d\n", err, errno); + + err = sendto(fd, data, len, MSG_FASTOPEN, (struct sockaddr *)&srv_sa, + sizeof(srv_sa)); + RET_ERR(err != len && expected >= PASS, + "sendto()", "family:%u err:%d errno:%d expected:%d\n", + family, err, errno, expected); + + return fd; +} + +static void do_test(int type, sa_family_t family, struct cmd *cmd, + enum result expected) +{ + int nev, srv_fd, cli_fd; + struct epoll_event ev; + struct cmd rcv_cmd; + ssize_t nread; + + cli_fd = send_data(type, family, cmd, cmd ? sizeof(*cmd) : 0, + expected); + if (cli_fd < 0) + return; + nev = epoll_wait(epfd, &ev, 1, expected >= PASS ? 5 : 0); + RET_IF((nev <= 0 && expected >= PASS) || + (nev > 0 && expected < PASS), + "nev <> expected", + "nev:%d expected:%d type:%d family:%d data:(%d, %d)\n", + nev, expected, type, family, + cmd ? cmd->reuseport_index : -1, + cmd ? cmd->pass_on_failure : -1); + check_results(); + check_data(type, family, cmd, cli_fd); + + if (expected < PASS) + return; + + RET_IF(expected != PASS_ERR_SK_SELECT_REUSEPORT && + cmd->reuseport_index != ev.data.u32, + "check cmd->reuseport_index", + "cmd:(%u, %u) ev.data.u32:%u\n", + cmd->pass_on_failure, cmd->reuseport_index, ev.data.u32); + + srv_fd = sk_fds[ev.data.u32]; + if (type == SOCK_STREAM) { + int new_fd = accept(srv_fd, NULL, 0); + + RET_IF(new_fd == -1, "accept(srv_fd)", + "ev.data.u32:%u new_fd:%d errno:%d\n", + ev.data.u32, new_fd, errno); + + nread = recv(new_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT); + RET_IF(nread != sizeof(rcv_cmd), + "recv(new_fd)", + "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", + ev.data.u32, nread, sizeof(rcv_cmd), errno); + + close(new_fd); + } else { + nread = recv(srv_fd, &rcv_cmd, sizeof(rcv_cmd), MSG_DONTWAIT); + RET_IF(nread != sizeof(rcv_cmd), + "recv(sk_fds)", + "ev.data.u32:%u nread:%zd sizeof(rcv_cmd):%zu errno:%d\n", + ev.data.u32, nread, sizeof(rcv_cmd), errno); + } + + close(cli_fd); +} + +static void test_err_inner_map(int type, sa_family_t family) +{ + struct cmd cmd = { + .reuseport_index = 0, + .pass_on_failure = 0, + }; + + expected_results[DROP_ERR_INNER_MAP]++; + do_test(type, family, &cmd, DROP_ERR_INNER_MAP); +} + +static void test_err_skb_data(int type, sa_family_t family) +{ + expected_results[DROP_ERR_SKB_DATA]++; + do_test(type, family, NULL, DROP_ERR_SKB_DATA); +} + +static void test_err_sk_select_port(int type, sa_family_t family) +{ + struct cmd cmd = { + .reuseport_index = REUSEPORT_ARRAY_SIZE, + .pass_on_failure = 0, + }; + + expected_results[DROP_ERR_SK_SELECT_REUSEPORT]++; + do_test(type, family, &cmd, DROP_ERR_SK_SELECT_REUSEPORT); +} + +static void test_pass(int type, sa_family_t family) +{ + struct cmd cmd; + int i; + + cmd.pass_on_failure = 0; + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) { + expected_results[PASS]++; + cmd.reuseport_index = i; + do_test(type, family, &cmd, PASS); + } +} + +static void test_syncookie(int type, sa_family_t family) +{ + int err, tmp_index = 1; + struct cmd cmd = { + .reuseport_index = 0, + .pass_on_failure = 0, + }; + + /* + * +1 for TCP-SYN and + * +1 for the TCP-ACK (ack the syncookie) + */ + expected_results[PASS] += 2; + enable_syncookie(); + /* + * Simulate TCP-SYN and TCP-ACK are handled by two different sk: + * TCP-SYN: select sk_fds[tmp_index = 1] tmp_index is from the + * tmp_index_ovr_map + * TCP-ACK: select sk_fds[reuseport_index = 0] reuseport_index + * is from the cmd.reuseport_index + */ + err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, + &tmp_index, BPF_ANY); + RET_IF(err < 0, "update_elem(tmp_index_ovr_map, 0, 1)", + "err:%d errno:%d\n", err, errno); + do_test(type, family, &cmd, PASS); + err = bpf_map_lookup_elem(tmp_index_ovr_map, &index_zero, + &tmp_index); + RET_IF(err < 0 || tmp_index >= 0, + "lookup_elem(tmp_index_ovr_map)", + "err:%d errno:%d tmp_index:%d\n", + err, errno, tmp_index); + disable_syncookie(); +} + +static void test_pass_on_err(int type, sa_family_t family) +{ + struct cmd cmd = { + .reuseport_index = REUSEPORT_ARRAY_SIZE, + .pass_on_failure = 1, + }; + + expected_results[PASS_ERR_SK_SELECT_REUSEPORT] += 1; + do_test(type, family, &cmd, PASS_ERR_SK_SELECT_REUSEPORT); +} + +static void test_detach_bpf(int type, sa_family_t family) +{ +#ifdef SO_DETACH_REUSEPORT_BPF + __u32 nr_run_before = 0, nr_run_after = 0, tmp, i; + struct epoll_event ev; + int cli_fd, err, nev; + struct cmd cmd = {}; + int optvalue = 0; + + err = setsockopt(sk_fds[0], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF, + &optvalue, sizeof(optvalue)); + RET_IF(err == -1, "setsockopt(SO_DETACH_REUSEPORT_BPF)", + "err:%d errno:%d\n", err, errno); + + err = setsockopt(sk_fds[1], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF, + &optvalue, sizeof(optvalue)); + RET_IF(err == 0 || errno != ENOENT, + "setsockopt(SO_DETACH_REUSEPORT_BPF)", + "err:%d errno:%d\n", err, errno); + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_lookup_elem(result_map, &i, &tmp); + RET_IF(err < 0, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); + nr_run_before += tmp; + } + + cli_fd = send_data(type, family, &cmd, sizeof(cmd), PASS); + if (cli_fd < 0) + return; + nev = epoll_wait(epfd, &ev, 1, 5); + RET_IF(nev <= 0, "nev <= 0", + "nev:%d expected:1 type:%d family:%d data:(0, 0)\n", + nev, type, family); + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_lookup_elem(result_map, &i, &tmp); + RET_IF(err < 0, "lookup_elem(result_map)", + "i:%u err:%d errno:%d\n", i, err, errno); + nr_run_after += tmp; + } + + RET_IF(nr_run_before != nr_run_after, + "nr_run_before != nr_run_after", + "nr_run_before:%u nr_run_after:%u\n", + nr_run_before, nr_run_after); + + close(cli_fd); +#else + test__skip(); +#endif +} + +static void prepare_sk_fds(int type, sa_family_t family, bool inany) +{ + const int first = REUSEPORT_ARRAY_SIZE - 1; + int i, err, optval = 1; + struct epoll_event ev; + socklen_t addrlen; + + if (inany) + sa46_init_inany(&srv_sa, family); + else + sa46_init_loopback(&srv_sa, family); + addrlen = sizeof(srv_sa); + + /* + * The sk_fds[] is filled from the back such that the order + * is exactly opposite to the (struct sock_reuseport *)reuse->socks[]. + */ + for (i = first; i >= 0; i--) { + sk_fds[i] = socket(family, type, 0); + RET_IF(sk_fds[i] == -1, "socket()", "sk_fds[%d]:%d errno:%d\n", + i, sk_fds[i], errno); + err = setsockopt(sk_fds[i], SOL_SOCKET, SO_REUSEPORT, + &optval, sizeof(optval)); + RET_IF(err == -1, "setsockopt(SO_REUSEPORT)", + "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); + + if (i == first) { + err = setsockopt(sk_fds[i], SOL_SOCKET, + SO_ATTACH_REUSEPORT_EBPF, + &select_by_skb_data_prog, + sizeof(select_by_skb_data_prog)); + RET_IF(err < 0, "setsockopt(SO_ATTACH_REUEPORT_EBPF)", + "err:%d errno:%d\n", err, errno); + } + + err = bind(sk_fds[i], (struct sockaddr *)&srv_sa, addrlen); + RET_IF(err < 0, "bind()", "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); + + if (type == SOCK_STREAM) { + err = listen(sk_fds[i], 10); + RET_IF(err < 0, "listen()", + "sk_fds[%d] err:%d errno:%d\n", + i, err, errno); + } + + err = bpf_map_update_elem(reuseport_array, &i, &sk_fds[i], + BPF_NOEXIST); + RET_IF(err < 0, "update_elem(reuseport_array)", + "sk_fds[%d] err:%d errno:%d\n", i, err, errno); + + if (i == first) { + socklen_t addrlen = sizeof(srv_sa); + + err = getsockname(sk_fds[i], (struct sockaddr *)&srv_sa, + &addrlen); + RET_IF(err == -1, "getsockname()", + "sk_fds[%d] err:%d errno:%d\n", i, err, errno); + } + } + + epfd = epoll_create(1); + RET_IF(epfd == -1, "epoll_create(1)", + "epfd:%d errno:%d\n", epfd, errno); + + ev.events = EPOLLIN; + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) { + ev.data.u32 = i; + err = epoll_ctl(epfd, EPOLL_CTL_ADD, sk_fds[i], &ev); + RET_IF(err, "epoll_ctl(EPOLL_CTL_ADD)", "sk_fds[%d]\n", i); + } +} + +static void setup_per_test(int type, sa_family_t family, bool inany, + bool no_inner_map) +{ + int ovr = -1, err; + + prepare_sk_fds(type, family, inany); + err = bpf_map_update_elem(tmp_index_ovr_map, &index_zero, &ovr, + BPF_ANY); + RET_IF(err < 0, "update_elem(tmp_index_ovr_map, 0, -1)", + "err:%d errno:%d\n", err, errno); + + /* Install reuseport_array to outer_map? */ + if (no_inner_map) + return; + + err = bpf_map_update_elem(outer_map, &index_zero, &reuseport_array, + BPF_ANY); + RET_IF(err < 0, "update_elem(outer_map, 0, reuseport_array)", + "err:%d errno:%d\n", err, errno); +} + +static void cleanup_per_test(bool no_inner_map) +{ + int i, err, zero = 0; + + memset(expected_results, 0, sizeof(expected_results)); + + for (i = 0; i < NR_RESULTS; i++) { + err = bpf_map_update_elem(result_map, &i, &zero, BPF_ANY); + RET_IF(err, "reset elem in result_map", + "i:%u err:%d errno:%d\n", i, err, errno); + } + + err = bpf_map_update_elem(linum_map, &zero, &zero, BPF_ANY); + RET_IF(err, "reset line number in linum_map", "err:%d errno:%d\n", + err, errno); + + for (i = 0; i < REUSEPORT_ARRAY_SIZE; i++) + close(sk_fds[i]); + close(epfd); + + /* Delete reuseport_array from outer_map? */ + if (no_inner_map) + return; + + err = bpf_map_delete_elem(outer_map, &index_zero); + RET_IF(err < 0, "delete_elem(outer_map)", + "err:%d errno:%d\n", err, errno); +} + +static void cleanup(void) +{ + if (outer_map >= 0) { + close(outer_map); + outer_map = -1; + } + + if (reuseport_array >= 0) { + close(reuseport_array); + reuseport_array = -1; + } + + if (obj) { + bpf_object__close(obj); + obj = NULL; + } + + memset(expected_results, 0, sizeof(expected_results)); +} + +static const char *maptype_str(enum bpf_map_type type) +{ + switch (type) { + case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: + return "reuseport_sockarray"; + case BPF_MAP_TYPE_SOCKMAP: + return "sockmap"; + case BPF_MAP_TYPE_SOCKHASH: + return "sockhash"; + default: + return "unknown"; + } +} + +static const char *family_str(sa_family_t family) +{ + switch (family) { + case AF_INET: + return "IPv4"; + case AF_INET6: + return "IPv6"; + default: + return "unknown"; + } +} + +static const char *sotype_str(int sotype) +{ + switch (sotype) { + case SOCK_STREAM: + return "TCP"; + case SOCK_DGRAM: + return "UDP"; + default: + return "unknown"; + } +} + +#define TEST_INIT(fn_, ...) { .fn = fn_, .name = #fn_, __VA_ARGS__ } + +static void test_config(int sotype, sa_family_t family, bool inany) +{ + const struct test { + void (*fn)(int sotype, sa_family_t family); + const char *name; + bool no_inner_map; + int need_sotype; + } tests[] = { + TEST_INIT(test_err_inner_map, + .no_inner_map = true), + TEST_INIT(test_err_skb_data), + TEST_INIT(test_err_sk_select_port), + TEST_INIT(test_pass), + TEST_INIT(test_syncookie, + .need_sotype = SOCK_STREAM), + TEST_INIT(test_pass_on_err), + TEST_INIT(test_detach_bpf), + }; + char s[MAX_TEST_NAME]; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (t->need_sotype && t->need_sotype != sotype) + continue; /* test not compatible with socket type */ + + snprintf(s, sizeof(s), "%s %s/%s %s %s", + maptype_str(inner_map_type), + family_str(family), sotype_str(sotype), + inany ? "INANY" : "LOOPBACK", t->name); + + if (!test__start_subtest(s)) + continue; + + setup_per_test(sotype, family, inany, t->no_inner_map); + t->fn(sotype, family); + cleanup_per_test(t->no_inner_map); + } +} + +#define BIND_INANY true + +static void test_all(void) +{ + const struct config { + int sotype; + sa_family_t family; + bool inany; + } configs[] = { + { SOCK_STREAM, AF_INET }, + { SOCK_STREAM, AF_INET, BIND_INANY }, + { SOCK_STREAM, AF_INET6 }, + { SOCK_STREAM, AF_INET6, BIND_INANY }, + { SOCK_DGRAM, AF_INET }, + { SOCK_DGRAM, AF_INET6 }, + }; + const struct config *c; + + for (c = configs; c < configs + ARRAY_SIZE(configs); c++) + test_config(c->sotype, c->family, c->inany); +} + +void test_map_type(enum bpf_map_type mt) +{ + if (create_maps(mt)) + goto out; + if (prepare_bpf_obj()) + goto out; + + test_all(); +out: + cleanup(); +} + +void serial_test_select_reuseport(void) +{ + saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL); + if (saved_tcp_fo < 0) + goto out; + saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL); + if (saved_tcp_syncookie < 0) + goto out; + + if (enable_fastopen()) + goto out; + if (disable_syncookie()) + goto out; + + test_map_type(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); + test_map_type(BPF_MAP_TYPE_SOCKMAP); + test_map_type(BPF_MAP_TYPE_SOCKHASH); +out: + restore_sysctls(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c new file mode 100644 index 000000000..d63a20fbe --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <sys/time.h> +#include <sys/resource.h> +#include "test_send_signal_kern.skel.h" + +static int sigusr1_received; + +static void sigusr1_handler(int signum) +{ + sigusr1_received = 1; +} + +static void test_send_signal_common(struct perf_event_attr *attr, + bool signal_thread) +{ + struct test_send_signal_kern *skel; + int pipe_c2p[2], pipe_p2c[2]; + int err = -1, pmu_fd = -1; + char buf[256]; + pid_t pid; + + if (!ASSERT_OK(pipe(pipe_c2p), "pipe_c2p")) + return; + + if (!ASSERT_OK(pipe(pipe_p2c), "pipe_p2c")) { + close(pipe_c2p[0]); + close(pipe_c2p[1]); + return; + } + + pid = fork(); + if (!ASSERT_GE(pid, 0, "fork")) { + close(pipe_c2p[0]); + close(pipe_c2p[1]); + close(pipe_p2c[0]); + close(pipe_p2c[1]); + return; + } + + if (pid == 0) { + int old_prio; + volatile int j = 0; + + /* install signal handler and notify parent */ + ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal"); + + close(pipe_c2p[0]); /* close read */ + close(pipe_p2c[1]); /* close write */ + + /* boost with a high priority so we got a higher chance + * that if an interrupt happens, the underlying task + * is this process. + */ + errno = 0; + old_prio = getpriority(PRIO_PROCESS, 0); + ASSERT_OK(errno, "getpriority"); + ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority"); + + /* notify parent signal handler is installed */ + ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write"); + + /* make sure parent enabled bpf program to send_signal */ + ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read"); + + /* wait a little for signal handler */ + for (int i = 0; i < 1000000000 && !sigusr1_received; i++) + j /= i + j + 1; + + buf[0] = sigusr1_received ? '2' : '0'; + ASSERT_EQ(sigusr1_received, 1, "sigusr1_received"); + ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write"); + + /* wait for parent notification and exit */ + ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read"); + + /* restore the old priority */ + ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority"); + + close(pipe_c2p[1]); + close(pipe_p2c[0]); + exit(0); + } + + close(pipe_c2p[1]); /* close write */ + close(pipe_p2c[0]); /* close read */ + + skel = test_send_signal_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + goto skel_open_load_failure; + + if (!attr) { + err = test_send_signal_kern__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) { + err = -1; + goto destroy_skel; + } + } else { + pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */, + -1 /* group id */, 0 /* flags */); + if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) { + err = -1; + goto destroy_skel; + } + + skel->links.send_signal_perf = + bpf_program__attach_perf_event(skel->progs.send_signal_perf, pmu_fd); + if (!ASSERT_OK_PTR(skel->links.send_signal_perf, "attach_perf_event")) + goto disable_pmu; + } + + /* wait until child signal handler installed */ + ASSERT_EQ(read(pipe_c2p[0], buf, 1), 1, "pipe_read"); + + /* trigger the bpf send_signal */ + skel->bss->signal_thread = signal_thread; + skel->bss->sig = SIGUSR1; + skel->bss->pid = pid; + + /* notify child that bpf program can send_signal now */ + ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write"); + + /* wait for result */ + err = read(pipe_c2p[0], buf, 1); + if (!ASSERT_GE(err, 0, "reading pipe")) + goto disable_pmu; + if (!ASSERT_GT(err, 0, "reading pipe error: size 0")) { + err = -1; + goto disable_pmu; + } + + ASSERT_EQ(buf[0], '2', "incorrect result"); + + /* notify child safe to exit */ + ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write"); + +disable_pmu: + close(pmu_fd); +destroy_skel: + test_send_signal_kern__destroy(skel); +skel_open_load_failure: + close(pipe_c2p[0]); + close(pipe_p2c[1]); + wait(NULL); +} + +static void test_send_signal_tracepoint(bool signal_thread) +{ + test_send_signal_common(NULL, signal_thread); +} + +static void test_send_signal_perf(bool signal_thread) +{ + struct perf_event_attr attr = { + .sample_period = 1, + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CPU_CLOCK, + }; + + test_send_signal_common(&attr, signal_thread); +} + +static void test_send_signal_nmi(bool signal_thread) +{ + struct perf_event_attr attr = { + .sample_period = 1, + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + }; + int pmu_fd; + + /* Some setups (e.g. virtual machines) might run with hardware + * perf events disabled. If this is the case, skip this test. + */ + pmu_fd = syscall(__NR_perf_event_open, &attr, 0 /* pid */, + -1 /* cpu */, -1 /* group_fd */, 0 /* flags */); + if (pmu_fd == -1) { + if (errno == ENOENT) { + printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", + __func__); + test__skip(); + return; + } + /* Let the test fail with a more informative message */ + } else { + close(pmu_fd); + } + + test_send_signal_common(&attr, signal_thread); +} + +void test_send_signal(void) +{ + if (test__start_subtest("send_signal_tracepoint")) + test_send_signal_tracepoint(false); + if (test__start_subtest("send_signal_perf")) + test_send_signal_perf(false); + if (test__start_subtest("send_signal_nmi")) + test_send_signal_nmi(false); + if (test__start_subtest("send_signal_tracepoint_thread")) + test_send_signal_tracepoint(true); + if (test__start_subtest("send_signal_perf_thread")) + test_send_signal_perf(true); + if (test__start_subtest("send_signal_nmi_thread")) + test_send_signal_nmi(true); +} diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c new file mode 100644 index 000000000..15dacfcfa --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <pthread.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include "test_send_signal_kern.skel.h" + +static void sigusr1_handler(int signum) +{ +} + +#define THREAD_COUNT 100 + +static void *worker(void *p) +{ + int i; + + for ( i = 0; i < 1000; i++) + usleep(1); + + return NULL; +} + +/* NOTE: cause events loss */ +void serial_test_send_signal_sched_switch(void) +{ + struct test_send_signal_kern *skel; + pthread_t threads[THREAD_COUNT]; + u32 duration = 0; + int i, err; + + signal(SIGUSR1, sigusr1_handler); + + skel = test_send_signal_kern__open_and_load(); + if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n")) + return; + + skel->bss->pid = getpid(); + skel->bss->sig = SIGUSR1; + + err = test_send_signal_kern__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed\n")) + goto destroy_skel; + + for (i = 0; i < THREAD_COUNT; i++) { + err = pthread_create(threads + i, NULL, worker, NULL); + if (CHECK(err, "pthread_create", "Error creating thread, %s\n", + strerror(errno))) + goto destroy_skel; + } + + for (i = 0; i < THREAD_COUNT; i++) + pthread_join(threads[i], NULL); + +destroy_skel: + test_send_signal_kern__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c new file mode 100644 index 000000000..018611e6b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/setget_sockopt.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#define _GNU_SOURCE +#include <sched.h> +#include <linux/socket.h> +#include <net/if.h> + +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" + +#include "setget_sockopt.skel.h" + +#define CG_NAME "/setget-sockopt-test" + +static const char addr4_str[] = "127.0.0.1"; +static const char addr6_str[] = "::1"; +static struct setget_sockopt *skel; +static int cg_fd; + +static int create_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return -1; + + if (!ASSERT_OK(system("ip link set dev lo up"), "set lo up")) + return -1; + + if (!ASSERT_OK(system("ip link add dev binddevtest1 type veth peer name binddevtest2"), + "add veth")) + return -1; + + if (!ASSERT_OK(system("ip link set dev binddevtest1 up"), + "bring veth up")) + return -1; + + return 0; +} + +static void test_tcp(int family) +{ + struct setget_sockopt__bss *bss = skel->bss; + int sfd, cfd; + + memset(bss, 0, sizeof(*bss)); + + sfd = start_server(family, SOCK_STREAM, + family == AF_INET6 ? addr6_str : addr4_str, 0, 0); + if (!ASSERT_GE(sfd, 0, "start_server")) + return; + + cfd = connect_to_fd(sfd, 0); + if (!ASSERT_GE(cfd, 0, "connect_to_fd_server")) { + close(sfd); + return; + } + close(sfd); + close(cfd); + + ASSERT_EQ(bss->nr_listen, 1, "nr_listen"); + ASSERT_EQ(bss->nr_connect, 1, "nr_connect"); + ASSERT_EQ(bss->nr_active, 1, "nr_active"); + ASSERT_EQ(bss->nr_passive, 1, "nr_passive"); + ASSERT_EQ(bss->nr_socket_post_create, 2, "nr_socket_post_create"); + ASSERT_EQ(bss->nr_binddev, 2, "nr_bind"); +} + +static void test_udp(int family) +{ + struct setget_sockopt__bss *bss = skel->bss; + int sfd; + + memset(bss, 0, sizeof(*bss)); + + sfd = start_server(family, SOCK_DGRAM, + family == AF_INET6 ? addr6_str : addr4_str, 0, 0); + if (!ASSERT_GE(sfd, 0, "start_server")) + return; + close(sfd); + + ASSERT_GE(bss->nr_socket_post_create, 1, "nr_socket_post_create"); + ASSERT_EQ(bss->nr_binddev, 1, "nr_bind"); +} + +void test_setget_sockopt(void) +{ + cg_fd = test__join_cgroup(CG_NAME); + if (cg_fd < 0) + return; + + if (create_netns()) + goto done; + + skel = setget_sockopt__open(); + if (!ASSERT_OK_PTR(skel, "open skel")) + goto done; + + strcpy(skel->rodata->veth, "binddevtest1"); + skel->rodata->veth_ifindex = if_nametoindex("binddevtest1"); + if (!ASSERT_GT(skel->rodata->veth_ifindex, 0, "if_nametoindex")) + goto done; + + if (!ASSERT_OK(setget_sockopt__load(skel), "load skel")) + goto done; + + skel->links.skops_sockopt = + bpf_program__attach_cgroup(skel->progs.skops_sockopt, cg_fd); + if (!ASSERT_OK_PTR(skel->links.skops_sockopt, "attach cgroup")) + goto done; + + skel->links.socket_post_create = + bpf_program__attach_cgroup(skel->progs.socket_post_create, cg_fd); + if (!ASSERT_OK_PTR(skel->links.socket_post_create, "attach_cgroup")) + goto done; + + test_tcp(AF_INET6); + test_tcp(AF_INET); + test_udp(AF_INET6); + test_udp(AF_INET); + +done: + setget_sockopt__destroy(skel); + close(cg_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c new file mode 100644 index 000000000..70b49da5c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +static void sigalrm_handler(int s) {} +static struct sigaction sigalrm_action = { + .sa_handler = sigalrm_handler, +}; + +static void test_signal_pending_by_type(enum bpf_prog_type prog_type) +{ + struct bpf_insn prog[4096]; + struct itimerval timeo = { + .it_value.tv_usec = 100000, /* 100ms */ + }; + int prog_fd; + int err; + int i; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 0xffffffff, + ); + + for (i = 0; i < ARRAY_SIZE(prog); i++) + prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0); + prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN(); + + prog_fd = bpf_test_load_program(prog_type, prog, ARRAY_SIZE(prog), + "GPL", 0, NULL, 0); + ASSERT_GE(prog_fd, 0, "test-run load"); + + err = sigaction(SIGALRM, &sigalrm_action, NULL); + ASSERT_OK(err, "test-run-signal-sigaction"); + + err = setitimer(ITIMER_REAL, &timeo, NULL); + ASSERT_OK(err, "test-run-signal-timer"); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_LE(topts.duration, 500000000 /* 500ms */, + "test-run-signal-duration"); + + signal(SIGALRM, SIG_DFL); +} + +void test_signal_pending(void) +{ + test_signal_pending_by_type(BPF_PROG_TYPE_SOCKET_FILTER); + test_signal_pending_by_type(BPF_PROG_TYPE_FLOW_DISSECTOR); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c new file mode 100644 index 000000000..1374b626a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook +// Copyright (c) 2019 Cloudflare +// Copyright (c) 2020 Isovalent, Inc. +/* + * Test that the socket assign program is able to redirect traffic towards a + * socket, regardless of whether the port or address destination of the traffic + * matches the port. + */ + +#define _GNU_SOURCE +#include <fcntl.h> +#include <signal.h> +#include <stdlib.h> +#include <unistd.h> + +#include "test_progs.h" + +#define BIND_PORT 1234 +#define CONNECT_PORT 4321 +#define TEST_DADDR (0xC0A80203) +#define NS_SELF "/proc/self/ns/net" +#define SERVER_MAP_PATH "/sys/fs/bpf/tc/globals/server_map" + +static const struct timeval timeo_sec = { .tv_sec = 3 }; +static const size_t timeo_optlen = sizeof(timeo_sec); +static int stop, duration; + +static bool +configure_stack(void) +{ + char tc_version[128]; + char tc_cmd[BUFSIZ]; + char *prog; + FILE *tc; + + /* Check whether tc is built with libbpf. */ + tc = popen("tc -V", "r"); + if (CHECK_FAIL(!tc)) + return false; + if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc))) + return false; + if (strstr(tc_version, ", libbpf ")) + prog = "test_sk_assign_libbpf.bpf.o"; + else + prog = "test_sk_assign.bpf.o"; + if (CHECK_FAIL(pclose(tc))) + return false; + + /* Move to a new networking namespace */ + if (CHECK_FAIL(unshare(CLONE_NEWNET))) + return false; + + /* Configure necessary links, routes */ + if (CHECK_FAIL(system("ip link set dev lo up"))) + return false; + if (CHECK_FAIL(system("ip route add local default dev lo"))) + return false; + if (CHECK_FAIL(system("ip -6 route add local default dev lo"))) + return false; + + /* Load qdisc, BPF program */ + if (CHECK_FAIL(system("tc qdisc add dev lo clsact"))) + return false; + sprintf(tc_cmd, "%s %s %s %s %s", "tc filter add dev lo ingress bpf", + "direct-action object-file", prog, + "section tc", + (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose"); + if (CHECK(system(tc_cmd), "BPF load failed;", + "run with -vv for more info\n")) + return false; + + return true; +} + +static int +start_server(const struct sockaddr *addr, socklen_t len, int type) +{ + int fd; + + fd = socket(addr->sa_family, type, 0); + if (CHECK_FAIL(fd == -1)) + goto out; + if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec, + timeo_optlen))) + goto close_out; + if (CHECK_FAIL(bind(fd, addr, len) == -1)) + goto close_out; + if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1)) + goto close_out; + + goto out; +close_out: + close(fd); + fd = -1; +out: + return fd; +} + +static int +connect_to_server(const struct sockaddr *addr, socklen_t len, int type) +{ + int fd = -1; + + fd = socket(addr->sa_family, type, 0); + if (CHECK_FAIL(fd == -1)) + goto out; + if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec, + timeo_optlen))) + goto close_out; + if (CHECK_FAIL(connect(fd, addr, len))) + goto close_out; + + goto out; +close_out: + close(fd); + fd = -1; +out: + return fd; +} + +static in_port_t +get_port(int fd) +{ + struct sockaddr_storage ss; + socklen_t slen = sizeof(ss); + in_port_t port = 0; + + if (CHECK_FAIL(getsockname(fd, (struct sockaddr *)&ss, &slen))) + return port; + + switch (ss.ss_family) { + case AF_INET: + port = ((struct sockaddr_in *)&ss)->sin_port; + break; + case AF_INET6: + port = ((struct sockaddr_in6 *)&ss)->sin6_port; + break; + default: + CHECK(1, "Invalid address family", "%d\n", ss.ss_family); + } + return port; +} + +static ssize_t +rcv_msg(int srv_client, int type) +{ + char buf[BUFSIZ]; + + if (type == SOCK_STREAM) + return read(srv_client, &buf, sizeof(buf)); + else + return recvfrom(srv_client, &buf, sizeof(buf), 0, NULL, NULL); +} + +static int +run_test(int server_fd, const struct sockaddr *addr, socklen_t len, int type) +{ + int client = -1, srv_client = -1; + char buf[] = "testing"; + in_port_t port; + int ret = 1; + + client = connect_to_server(addr, len, type); + if (client == -1) { + perror("Cannot connect to server"); + goto out; + } + + if (type == SOCK_STREAM) { + srv_client = accept(server_fd, NULL, NULL); + if (CHECK_FAIL(srv_client == -1)) { + perror("Can't accept connection"); + goto out; + } + } else { + srv_client = server_fd; + } + if (CHECK_FAIL(write(client, buf, sizeof(buf)) != sizeof(buf))) { + perror("Can't write on client"); + goto out; + } + if (CHECK_FAIL(rcv_msg(srv_client, type) != sizeof(buf))) { + perror("Can't read on server"); + goto out; + } + + port = get_port(srv_client); + if (CHECK_FAIL(!port)) + goto out; + /* SOCK_STREAM is connected via accept(), so the server's local address + * will be the CONNECT_PORT rather than the BIND port that corresponds + * to the listen socket. SOCK_DGRAM on the other hand is connectionless + * so we can't really do the same check there; the server doesn't ever + * create a socket with CONNECT_PORT. + */ + if (type == SOCK_STREAM && + CHECK(port != htons(CONNECT_PORT), "Expected", "port %u but got %u", + CONNECT_PORT, ntohs(port))) + goto out; + else if (type == SOCK_DGRAM && + CHECK(port != htons(BIND_PORT), "Expected", + "port %u but got %u", BIND_PORT, ntohs(port))) + goto out; + + ret = 0; +out: + close(client); + if (srv_client != server_fd) + close(srv_client); + if (ret) + WRITE_ONCE(stop, 1); + return ret; +} + +static void +prepare_addr(struct sockaddr *addr, int family, __u16 port, bool rewrite_addr) +{ + struct sockaddr_in *addr4; + struct sockaddr_in6 *addr6; + + switch (family) { + case AF_INET: + addr4 = (struct sockaddr_in *)addr; + memset(addr4, 0, sizeof(*addr4)); + addr4->sin_family = family; + addr4->sin_port = htons(port); + if (rewrite_addr) + addr4->sin_addr.s_addr = htonl(TEST_DADDR); + else + addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + break; + case AF_INET6: + addr6 = (struct sockaddr_in6 *)addr; + memset(addr6, 0, sizeof(*addr6)); + addr6->sin6_family = family; + addr6->sin6_port = htons(port); + addr6->sin6_addr = in6addr_loopback; + if (rewrite_addr) + addr6->sin6_addr.s6_addr32[3] = htonl(TEST_DADDR); + break; + default: + fprintf(stderr, "Invalid family %d", family); + } +} + +struct test_sk_cfg { + const char *name; + int family; + struct sockaddr *addr; + socklen_t len; + int type; + bool rewrite_addr; +}; + +#define TEST(NAME, FAMILY, TYPE, REWRITE) \ +{ \ + .name = NAME, \ + .family = FAMILY, \ + .addr = (FAMILY == AF_INET) ? (struct sockaddr *)&addr4 \ + : (struct sockaddr *)&addr6, \ + .len = (FAMILY == AF_INET) ? sizeof(addr4) : sizeof(addr6), \ + .type = TYPE, \ + .rewrite_addr = REWRITE, \ +} + +void test_sk_assign(void) +{ + struct sockaddr_in addr4; + struct sockaddr_in6 addr6; + struct test_sk_cfg tests[] = { + TEST("ipv4 tcp port redir", AF_INET, SOCK_STREAM, false), + TEST("ipv4 tcp addr redir", AF_INET, SOCK_STREAM, true), + TEST("ipv6 tcp port redir", AF_INET6, SOCK_STREAM, false), + TEST("ipv6 tcp addr redir", AF_INET6, SOCK_STREAM, true), + TEST("ipv4 udp port redir", AF_INET, SOCK_DGRAM, false), + TEST("ipv4 udp addr redir", AF_INET, SOCK_DGRAM, true), + TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false), + TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true), + }; + __s64 server = -1; + int server_map; + int self_net; + int i; + + self_net = open(NS_SELF, O_RDONLY); + if (CHECK_FAIL(self_net < 0)) { + perror("Unable to open "NS_SELF); + return; + } + + if (!configure_stack()) { + perror("configure_stack"); + goto cleanup; + } + + server_map = bpf_obj_get(SERVER_MAP_PATH); + if (CHECK_FAIL(server_map < 0)) { + perror("Unable to open " SERVER_MAP_PATH); + goto cleanup; + } + + for (i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) { + struct test_sk_cfg *test = &tests[i]; + const struct sockaddr *addr; + const int zero = 0; + int err; + + if (!test__start_subtest(test->name)) + continue; + prepare_addr(test->addr, test->family, BIND_PORT, false); + addr = (const struct sockaddr *)test->addr; + server = start_server(addr, test->len, test->type); + if (server == -1) + goto close; + + err = bpf_map_update_elem(server_map, &zero, &server, BPF_ANY); + if (CHECK_FAIL(err)) { + perror("Unable to update server_map"); + goto close; + } + + /* connect to unbound ports */ + prepare_addr(test->addr, test->family, CONNECT_PORT, + test->rewrite_addr); + if (run_test(server, addr, test->len, test->type)) + goto close; + + close(server); + server = -1; + } + +close: + close(server); + close(server_map); +cleanup: + if (CHECK_FAIL(unlink(SERVER_MAP_PATH))) + perror("Unable to unlink " SERVER_MAP_PATH); + if (CHECK_FAIL(setns(self_net, CLONE_NEWNET))) + perror("Failed to setns("NS_SELF")"); + close(self_net); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c new file mode 100644 index 000000000..597d0467a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -0,0 +1,1413 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2020 Cloudflare +/* + * Test BPF attach point for INET socket lookup (BPF_SK_LOOKUP). + * + * Tests exercise: + * - attaching/detaching/querying programs to BPF_SK_LOOKUP hook, + * - redirecting socket lookup to a socket selected by BPF program, + * - failing a socket lookup on BPF program's request, + * - error scenarios for selecting a socket from BPF program, + * - accessing BPF program context, + * - attaching and running multiple BPF programs. + * + * Tests run in a dedicated network namespace. + */ + +#define _GNU_SOURCE +#include <arpa/inet.h> +#include <assert.h> +#include <errno.h> +#include <error.h> +#include <fcntl.h> +#include <sched.h> +#include <stdio.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> + +#include <bpf/libbpf.h> +#include <bpf/bpf.h> + +#include "test_progs.h" +#include "bpf_util.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "testing_helpers.h" +#include "test_sk_lookup.skel.h" + +/* External (address, port) pairs the client sends packets to. */ +#define EXT_IP4 "127.0.0.1" +#define EXT_IP6 "fd00::1" +#define EXT_PORT 7007 + +/* Internal (address, port) pairs the server listens/receives at. */ +#define INT_IP4 "127.0.0.2" +#define INT_IP4_V6 "::ffff:127.0.0.2" +#define INT_IP6 "fd00::2" +#define INT_PORT 8008 + +#define IO_TIMEOUT_SEC 3 + +enum server { + SERVER_A = 0, + SERVER_B = 1, + MAX_SERVERS, +}; + +enum { + PROG1 = 0, + PROG2, +}; + +struct inet_addr { + const char *ip; + unsigned short port; +}; + +struct test { + const char *desc; + struct bpf_program *lookup_prog; + struct bpf_program *reuseport_prog; + struct bpf_map *sock_map; + int sotype; + struct inet_addr connect_to; + struct inet_addr listen_at; + enum server accept_on; + bool reuseport_has_conns; /* Add a connected socket to reuseport group */ +}; + +static __u32 duration; /* for CHECK macro */ + +static bool is_ipv6(const char *ip) +{ + return !!strchr(ip, ':'); +} + +static int attach_reuseport(int sock_fd, struct bpf_program *reuseport_prog) +{ + int err, prog_fd; + + prog_fd = bpf_program__fd(reuseport_prog); + if (prog_fd < 0) { + errno = -prog_fd; + return -1; + } + + err = setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, + &prog_fd, sizeof(prog_fd)); + if (err) + return -1; + + return 0; +} + +static socklen_t inetaddr_len(const struct sockaddr_storage *addr) +{ + return (addr->ss_family == AF_INET ? sizeof(struct sockaddr_in) : + addr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0); +} + +static int make_socket(int sotype, const char *ip, int port, + struct sockaddr_storage *addr) +{ + struct timeval timeo = { .tv_sec = IO_TIMEOUT_SEC }; + int err, family, fd; + + family = is_ipv6(ip) ? AF_INET6 : AF_INET; + err = make_sockaddr(family, ip, port, addr, NULL); + if (CHECK(err, "make_address", "failed\n")) + return -1; + + fd = socket(addr->ss_family, sotype, 0); + if (CHECK(fd < 0, "socket", "failed\n")) { + log_err("failed to make socket"); + return -1; + } + + err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); + if (CHECK(err, "setsockopt(SO_SNDTIMEO)", "failed\n")) { + log_err("failed to set SNDTIMEO"); + close(fd); + return -1; + } + + err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); + if (CHECK(err, "setsockopt(SO_RCVTIMEO)", "failed\n")) { + log_err("failed to set RCVTIMEO"); + close(fd); + return -1; + } + + return fd; +} + +static int make_server(int sotype, const char *ip, int port, + struct bpf_program *reuseport_prog) +{ + struct sockaddr_storage addr = {0}; + const int one = 1; + int err, fd = -1; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + /* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */ + if (sotype == SOCK_DGRAM) { + err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) { + log_err("failed to enable IP_RECVORIGDSTADDR"); + goto fail; + } + } + + if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) { + err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) { + log_err("failed to enable IPV6_RECVORIGDSTADDR"); + goto fail; + } + } + + if (sotype == SOCK_STREAM) { + err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) { + log_err("failed to enable SO_REUSEADDR"); + goto fail; + } + } + + if (reuseport_prog) { + err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) { + log_err("failed to enable SO_REUSEPORT"); + goto fail; + } + } + + err = bind(fd, (void *)&addr, inetaddr_len(&addr)); + if (CHECK(err, "bind", "failed\n")) { + log_err("failed to bind listen socket"); + goto fail; + } + + if (sotype == SOCK_STREAM) { + err = listen(fd, SOMAXCONN); + if (CHECK(err, "make_server", "listen")) { + log_err("failed to listen on port %d", port); + goto fail; + } + } + + /* Late attach reuseport prog so we can have one init path */ + if (reuseport_prog) { + err = attach_reuseport(fd, reuseport_prog); + if (CHECK(err, "attach_reuseport", "failed\n")) { + log_err("failed to attach reuseport prog"); + goto fail; + } + } + + return fd; +fail: + close(fd); + return -1; +} + +static int make_client(int sotype, const char *ip, int port) +{ + struct sockaddr_storage addr = {0}; + int err, fd; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + err = connect(fd, (void *)&addr, inetaddr_len(&addr)); + if (CHECK(err, "make_client", "connect")) { + log_err("failed to connect client socket"); + goto fail; + } + + return fd; +fail: + close(fd); + return -1; +} + +static __u64 socket_cookie(int fd) +{ + __u64 cookie; + socklen_t cookie_len = sizeof(cookie); + + if (CHECK(getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len) < 0, + "getsockopt(SO_COOKIE)", "%s\n", strerror(errno))) + return 0; + return cookie; +} + +static int fill_sk_lookup_ctx(struct bpf_sk_lookup *ctx, const char *local_ip, __u16 local_port, + const char *remote_ip, __u16 remote_port) +{ + void *local, *remote; + int err; + + memset(ctx, 0, sizeof(*ctx)); + ctx->local_port = local_port; + ctx->remote_port = htons(remote_port); + + if (is_ipv6(local_ip)) { + ctx->family = AF_INET6; + local = &ctx->local_ip6[0]; + remote = &ctx->remote_ip6[0]; + } else { + ctx->family = AF_INET; + local = &ctx->local_ip4; + remote = &ctx->remote_ip4; + } + + err = inet_pton(ctx->family, local_ip, local); + if (CHECK(err != 1, "inet_pton", "local_ip failed\n")) + return 1; + + err = inet_pton(ctx->family, remote_ip, remote); + if (CHECK(err != 1, "inet_pton", "remote_ip failed\n")) + return 1; + + return 0; +} + +static int send_byte(int fd) +{ + ssize_t n; + + errno = 0; + n = send(fd, "a", 1, 0); + if (CHECK(n <= 0, "send_byte", "send")) { + log_err("failed/partial send"); + return -1; + } + return 0; +} + +static int recv_byte(int fd) +{ + char buf[1]; + ssize_t n; + + n = recv(fd, buf, sizeof(buf), 0); + if (CHECK(n <= 0, "recv_byte", "recv")) { + log_err("failed/partial recv"); + return -1; + } + return 0; +} + +static int tcp_recv_send(int server_fd) +{ + char buf[1]; + int ret, fd; + ssize_t n; + + fd = accept(server_fd, NULL, NULL); + if (CHECK(fd < 0, "accept", "failed\n")) { + log_err("failed to accept"); + return -1; + } + + n = recv(fd, buf, sizeof(buf), 0); + if (CHECK(n <= 0, "recv", "failed\n")) { + log_err("failed/partial recv"); + ret = -1; + goto close; + } + + n = send(fd, buf, n, 0); + if (CHECK(n <= 0, "send", "failed\n")) { + log_err("failed/partial send"); + ret = -1; + goto close; + } + + ret = 0; +close: + close(fd); + return ret; +} + +static void v4_to_v6(struct sockaddr_storage *ss) +{ + struct sockaddr_in6 *v6 = (struct sockaddr_in6 *)ss; + struct sockaddr_in v4 = *(struct sockaddr_in *)ss; + + v6->sin6_family = AF_INET6; + v6->sin6_port = v4.sin_port; + v6->sin6_addr.s6_addr[10] = 0xff; + v6->sin6_addr.s6_addr[11] = 0xff; + memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4); + memset(&v6->sin6_addr.s6_addr[0], 0, 10); +} + +static int udp_recv_send(int server_fd) +{ + char cmsg_buf[CMSG_SPACE(sizeof(struct sockaddr_storage))]; + struct sockaddr_storage _src_addr = { 0 }; + struct sockaddr_storage *src_addr = &_src_addr; + struct sockaddr_storage *dst_addr = NULL; + struct msghdr msg = { 0 }; + struct iovec iov = { 0 }; + struct cmsghdr *cm; + char buf[1]; + int ret, fd; + ssize_t n; + + iov.iov_base = buf; + iov.iov_len = sizeof(buf); + + msg.msg_name = src_addr; + msg.msg_namelen = sizeof(*src_addr); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = cmsg_buf; + msg.msg_controllen = sizeof(cmsg_buf); + + errno = 0; + n = recvmsg(server_fd, &msg, 0); + if (CHECK(n <= 0, "recvmsg", "failed\n")) { + log_err("failed to receive"); + return -1; + } + if (CHECK(msg.msg_flags & MSG_CTRUNC, "recvmsg", "truncated cmsg\n")) + return -1; + + for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) { + if ((cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_ORIGDSTADDR) || + (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_ORIGDSTADDR)) { + dst_addr = (struct sockaddr_storage *)CMSG_DATA(cm); + break; + } + log_err("warning: ignored cmsg at level %d type %d", + cm->cmsg_level, cm->cmsg_type); + } + if (CHECK(!dst_addr, "recvmsg", "missing ORIGDSTADDR\n")) + return -1; + + /* Server socket bound to IPv4-mapped IPv6 address */ + if (src_addr->ss_family == AF_INET6 && + dst_addr->ss_family == AF_INET) { + v4_to_v6(dst_addr); + } + + /* Reply from original destination address. */ + fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0); + if (CHECK(fd < 0, "socket", "failed\n")) { + log_err("failed to create tx socket"); + return -1; + } + + ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr)); + if (CHECK(ret, "bind", "failed\n")) { + log_err("failed to bind tx socket"); + goto out; + } + + msg.msg_control = NULL; + msg.msg_controllen = 0; + n = sendmsg(fd, &msg, 0); + if (CHECK(n <= 0, "sendmsg", "failed\n")) { + log_err("failed to send echo reply"); + ret = -1; + goto out; + } + + ret = 0; +out: + close(fd); + return ret; +} + +static int tcp_echo_test(int client_fd, int server_fd) +{ + int err; + + err = send_byte(client_fd); + if (err) + return -1; + err = tcp_recv_send(server_fd); + if (err) + return -1; + err = recv_byte(client_fd); + if (err) + return -1; + + return 0; +} + +static int udp_echo_test(int client_fd, int server_fd) +{ + int err; + + err = send_byte(client_fd); + if (err) + return -1; + err = udp_recv_send(server_fd); + if (err) + return -1; + err = recv_byte(client_fd); + if (err) + return -1; + + return 0; +} + +static struct bpf_link *attach_lookup_prog(struct bpf_program *prog) +{ + struct bpf_link *link; + int net_fd; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open", "failed\n")) { + log_err("failed to open /proc/self/ns/net"); + return NULL; + } + + link = bpf_program__attach_netns(prog, net_fd); + if (!ASSERT_OK_PTR(link, "bpf_program__attach_netns")) { + errno = -PTR_ERR(link); + log_err("failed to attach program '%s' to netns", + bpf_program__name(prog)); + link = NULL; + } + + close(net_fd); + return link; +} + +static int update_lookup_map(struct bpf_map *map, int index, int sock_fd) +{ + int err, map_fd; + uint64_t value; + + map_fd = bpf_map__fd(map); + if (CHECK(map_fd < 0, "bpf_map__fd", "failed\n")) { + errno = -map_fd; + log_err("failed to get map FD"); + return -1; + } + + value = (uint64_t)sock_fd; + err = bpf_map_update_elem(map_fd, &index, &value, BPF_NOEXIST); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) { + log_err("failed to update redir_map @ %d", index); + return -1; + } + + return 0; +} + +static void query_lookup_prog(struct test_sk_lookup *skel) +{ + struct bpf_link *link[3] = {}; + struct bpf_link_info info; + __u32 attach_flags = 0; + __u32 prog_ids[3] = {}; + __u32 prog_cnt = 3; + __u32 prog_id; + int net_fd; + int err; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open", "failed\n")) { + log_err("failed to open /proc/self/ns/net"); + return; + } + + link[0] = attach_lookup_prog(skel->progs.lookup_pass); + if (!link[0]) + goto close; + link[1] = attach_lookup_prog(skel->progs.lookup_pass); + if (!link[1]) + goto detach; + link[2] = attach_lookup_prog(skel->progs.lookup_drop); + if (!link[2]) + goto detach; + + err = bpf_prog_query(net_fd, BPF_SK_LOOKUP, 0 /* query flags */, + &attach_flags, prog_ids, &prog_cnt); + if (CHECK(err, "bpf_prog_query", "failed\n")) { + log_err("failed to query lookup prog"); + goto detach; + } + + errno = 0; + if (CHECK(attach_flags != 0, "bpf_prog_query", + "wrong attach_flags on query: %u", attach_flags)) + goto detach; + if (CHECK(prog_cnt != 3, "bpf_prog_query", + "wrong program count on query: %u", prog_cnt)) + goto detach; + prog_id = link_info_prog_id(link[0], &info); + CHECK(prog_ids[0] != prog_id, "bpf_prog_query", + "invalid program #0 id on query: %u != %u\n", + prog_ids[0], prog_id); + CHECK(info.netns.netns_ino == 0, "netns_ino", + "unexpected netns_ino: %u\n", info.netns.netns_ino); + prog_id = link_info_prog_id(link[1], &info); + CHECK(prog_ids[1] != prog_id, "bpf_prog_query", + "invalid program #1 id on query: %u != %u\n", + prog_ids[1], prog_id); + CHECK(info.netns.netns_ino == 0, "netns_ino", + "unexpected netns_ino: %u\n", info.netns.netns_ino); + prog_id = link_info_prog_id(link[2], &info); + CHECK(prog_ids[2] != prog_id, "bpf_prog_query", + "invalid program #2 id on query: %u != %u\n", + prog_ids[2], prog_id); + CHECK(info.netns.netns_ino == 0, "netns_ino", + "unexpected netns_ino: %u\n", info.netns.netns_ino); + + err = bpf_link__detach(link[0]); + if (CHECK(err, "link_detach", "failed %d\n", err)) + goto detach; + + /* prog id is still there, but netns_ino is zeroed out */ + prog_id = link_info_prog_id(link[0], &info); + CHECK(prog_ids[0] != prog_id, "bpf_prog_query", + "invalid program #0 id on query: %u != %u\n", + prog_ids[0], prog_id); + CHECK(info.netns.netns_ino != 0, "netns_ino", + "unexpected netns_ino: %u\n", info.netns.netns_ino); + +detach: + if (link[2]) + bpf_link__destroy(link[2]); + if (link[1]) + bpf_link__destroy(link[1]); + if (link[0]) + bpf_link__destroy(link[0]); +close: + close(net_fd); +} + +static void run_lookup_prog(const struct test *t) +{ + int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 }; + int client_fd, reuse_conn_fd = -1; + struct bpf_link *lookup_link; + int i, err; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + server_fds[i] = make_server(t->sotype, t->listen_at.ip, + t->listen_at.port, + t->reuseport_prog); + if (server_fds[i] < 0) + goto close; + + err = update_lookup_map(t->sock_map, i, server_fds[i]); + if (err) + goto close; + + /* want just one server for non-reuseport test */ + if (!t->reuseport_prog) + break; + } + + /* Regular UDP socket lookup with reuseport behaves + * differently when reuseport group contains connected + * sockets. Check that adding a connected UDP socket to the + * reuseport group does not affect how reuseport works with + * BPF socket lookup. + */ + if (t->reuseport_has_conns) { + struct sockaddr_storage addr = {}; + socklen_t len = sizeof(addr); + + /* Add an extra socket to reuseport group */ + reuse_conn_fd = make_server(t->sotype, t->listen_at.ip, + t->listen_at.port, + t->reuseport_prog); + if (reuse_conn_fd < 0) + goto close; + + /* Connect the extra socket to itself */ + err = getsockname(reuse_conn_fd, (void *)&addr, &len); + if (CHECK(err, "getsockname", "errno %d\n", errno)) + goto close; + err = connect(reuse_conn_fd, (void *)&addr, len); + if (CHECK(err, "connect", "errno %d\n", errno)) + goto close; + } + + client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port); + if (client_fd < 0) + goto close; + + if (t->sotype == SOCK_STREAM) + tcp_echo_test(client_fd, server_fds[t->accept_on]); + else + udp_echo_test(client_fd, server_fds[t->accept_on]); + + close(client_fd); +close: + if (reuse_conn_fd != -1) + close(reuse_conn_fd); + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + if (server_fds[i] != -1) + close(server_fds[i]); + } + bpf_link__destroy(lookup_link); +} + +static void test_redirect_lookup(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv4 redir addr", + .lookup_prog = skel->progs.redir_ip4, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv4 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "TCP IPv4 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "TCP IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + { + .desc = "TCP IPv6 redir addr", + .lookup_prog = skel->progs.redir_ip6, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, EXT_PORT }, + }, + { + .desc = "TCP IPv4->IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4_V6, INT_PORT }, + }, + { + .desc = "TCP IPv6 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "TCP IPv6 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "UDP IPv4 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, INT_PORT }, + }, + { + .desc = "UDP IPv4 redir addr", + .lookup_prog = skel->progs.redir_ip4, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv4 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "UDP IPv4 redir and reuseport with conns", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + .reuseport_has_conns = true, + }, + { + .desc = "UDP IPv4 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "UDP IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + { + .desc = "UDP IPv6 redir addr", + .lookup_prog = skel->progs.redir_ip6, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4->IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .listen_at = { INT_IP4_V6, INT_PORT }, + .connect_to = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 redir and reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "UDP IPv6 redir and reuseport with conns", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + .reuseport_has_conns = true, + }, + { + .desc = "UDP IPv6 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_A, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + run_lookup_prog(t); + } +} + +static void drop_on_lookup(const struct test *t) +{ + struct sockaddr_storage dst = {}; + int client_fd, server_fd, err; + struct bpf_link *lookup_link; + ssize_t n; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + server_fd = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, + t->reuseport_prog); + if (server_fd < 0) + goto detach; + + client_fd = make_socket(t->sotype, t->connect_to.ip, + t->connect_to.port, &dst); + if (client_fd < 0) + goto close_srv; + + err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + if (t->sotype == SOCK_DGRAM) { + err = send_byte(client_fd); + if (err) + goto close_all; + + /* Read out asynchronous error */ + n = recv(client_fd, NULL, 0, 0); + err = n == -1; + } + if (CHECK(!err || errno != ECONNREFUSED, "connect", + "unexpected success or error\n")) + log_err("expected ECONNREFUSED on connect"); + +close_all: + close(client_fd); +close_srv: + close(server_fd); +detach: + bpf_link__destroy(lookup_link); +} + +static void test_drop_on_lookup(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv6 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + /* The program will drop on success, meaning that the ifindex + * was 1. + */ + { + .desc = "TCP IPv4 drop on valid ifindex", + .lookup_prog = skel->progs.check_ifindex, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv6 drop on valid ifindex", + .lookup_prog = skel->progs.check_ifindex, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4 drop on valid ifindex", + .lookup_prog = skel->progs.check_ifindex, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 drop on valid ifindex", + .lookup_prog = skel->progs.check_ifindex, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, EXT_PORT }, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + drop_on_lookup(t); + } +} + +static void drop_on_reuseport(const struct test *t) +{ + struct sockaddr_storage dst = { 0 }; + int client, server1, server2, err; + struct bpf_link *lookup_link; + ssize_t n; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + server1 = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, + t->reuseport_prog); + if (server1 < 0) + goto detach; + + err = update_lookup_map(t->sock_map, SERVER_A, server1); + if (err) + goto detach; + + /* second server on destination address we should never reach */ + server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port, + NULL /* reuseport prog */); + if (server2 < 0) + goto close_srv1; + + client = make_socket(t->sotype, t->connect_to.ip, + t->connect_to.port, &dst); + if (client < 0) + goto close_srv2; + + err = connect(client, (void *)&dst, inetaddr_len(&dst)); + if (t->sotype == SOCK_DGRAM) { + err = send_byte(client); + if (err) + goto close_all; + + /* Read out asynchronous error */ + n = recv(client, NULL, 0, 0); + err = n == -1; + } + if (CHECK(!err || errno != ECONNREFUSED, "connect", + "unexpected success or error\n")) + log_err("expected ECONNREFUSED on connect"); + +close_all: + close(client); +close_srv2: + close(server2); +close_srv1: + close(server1); +detach: + bpf_link__destroy(lookup_link); +} + +static void test_drop_on_reuseport(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv6 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + }, + { + .desc = "UDP IPv4 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv6 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + drop_on_reuseport(t); + } +} + +static void run_sk_assign(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog, + const char *remote_ip, const char *local_ip) +{ + int server_fds[] = { [0 ... MAX_SERVERS - 1] = -1 }; + struct bpf_sk_lookup ctx; + __u64 server_cookie; + int i, err; + + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .ctx_in = &ctx, + .ctx_size_in = sizeof(ctx), + .ctx_out = &ctx, + .ctx_size_out = sizeof(ctx), + ); + + if (fill_sk_lookup_ctx(&ctx, local_ip, EXT_PORT, remote_ip, INT_PORT)) + return; + + ctx.protocol = IPPROTO_TCP; + + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + server_fds[i] = make_server(SOCK_STREAM, local_ip, 0, NULL); + if (server_fds[i] < 0) + goto close_servers; + + err = update_lookup_map(skel->maps.redir_map, i, + server_fds[i]); + if (err) + goto close_servers; + } + + server_cookie = socket_cookie(server_fds[SERVER_B]); + if (!server_cookie) + return; + + err = bpf_prog_test_run_opts(bpf_program__fd(lookup_prog), &opts); + if (CHECK(err, "test_run", "failed with error %d\n", errno)) + goto close_servers; + + if (CHECK(ctx.cookie == 0, "ctx.cookie", "no socket selected\n")) + goto close_servers; + + CHECK(ctx.cookie != server_cookie, "ctx.cookie", + "selected sk %llu instead of %llu\n", ctx.cookie, server_cookie); + +close_servers: + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + if (server_fds[i] != -1) + close(server_fds[i]); + } +} + +static void run_sk_assign_v4(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog) +{ + run_sk_assign(skel, lookup_prog, INT_IP4, EXT_IP4); +} + +static void run_sk_assign_v6(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog) +{ + run_sk_assign(skel, lookup_prog, INT_IP6, EXT_IP6); +} + +static void run_sk_assign_connected(struct test_sk_lookup *skel, + int sotype) +{ + int err, client_fd, connected_fd, server_fd; + struct bpf_link *lookup_link; + + server_fd = make_server(sotype, EXT_IP4, EXT_PORT, NULL); + if (server_fd < 0) + return; + + connected_fd = make_client(sotype, EXT_IP4, EXT_PORT); + if (connected_fd < 0) + goto out_close_server; + + /* Put a connected socket in redirect map */ + err = update_lookup_map(skel->maps.redir_map, SERVER_A, connected_fd); + if (err) + goto out_close_connected; + + lookup_link = attach_lookup_prog(skel->progs.sk_assign_esocknosupport); + if (!lookup_link) + goto out_close_connected; + + /* Try to redirect TCP SYN / UDP packet to a connected socket */ + client_fd = make_client(sotype, EXT_IP4, EXT_PORT); + if (client_fd < 0) + goto out_unlink_prog; + if (sotype == SOCK_DGRAM) { + send_byte(client_fd); + recv_byte(server_fd); + } + + close(client_fd); +out_unlink_prog: + bpf_link__destroy(lookup_link); +out_close_connected: + close(connected_fd); +out_close_server: + close(server_fd); +} + +static void test_sk_assign_helper(struct test_sk_lookup *skel) +{ + if (test__start_subtest("sk_assign returns EEXIST")) + run_sk_assign_v4(skel, skel->progs.sk_assign_eexist); + if (test__start_subtest("sk_assign honors F_REPLACE")) + run_sk_assign_v4(skel, skel->progs.sk_assign_replace_flag); + if (test__start_subtest("sk_assign accepts NULL socket")) + run_sk_assign_v4(skel, skel->progs.sk_assign_null); + if (test__start_subtest("access ctx->sk")) + run_sk_assign_v4(skel, skel->progs.access_ctx_sk); + if (test__start_subtest("narrow access to ctx v4")) + run_sk_assign_v4(skel, skel->progs.ctx_narrow_access); + if (test__start_subtest("narrow access to ctx v6")) + run_sk_assign_v6(skel, skel->progs.ctx_narrow_access); + if (test__start_subtest("sk_assign rejects TCP established")) + run_sk_assign_connected(skel, SOCK_STREAM); + if (test__start_subtest("sk_assign rejects UDP connected")) + run_sk_assign_connected(skel, SOCK_DGRAM); +} + +struct test_multi_prog { + const char *desc; + struct bpf_program *prog1; + struct bpf_program *prog2; + struct bpf_map *redir_map; + struct bpf_map *run_map; + int expect_errno; + struct inet_addr listen_at; +}; + +static void run_multi_prog_lookup(const struct test_multi_prog *t) +{ + struct sockaddr_storage dst = {}; + int map_fd, server_fd, client_fd; + struct bpf_link *link1, *link2; + int prog_idx, done, err; + + map_fd = bpf_map__fd(t->run_map); + + done = 0; + prog_idx = PROG1; + err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) + return; + prog_idx = PROG2; + err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) + return; + + link1 = attach_lookup_prog(t->prog1); + if (!link1) + return; + link2 = attach_lookup_prog(t->prog2); + if (!link2) + goto out_unlink1; + + server_fd = make_server(SOCK_STREAM, t->listen_at.ip, + t->listen_at.port, NULL); + if (server_fd < 0) + goto out_unlink2; + + err = update_lookup_map(t->redir_map, SERVER_A, server_fd); + if (err) + goto out_close_server; + + client_fd = make_socket(SOCK_STREAM, EXT_IP4, EXT_PORT, &dst); + if (client_fd < 0) + goto out_close_server; + + err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + if (CHECK(err && !t->expect_errno, "connect", + "unexpected error %d\n", errno)) + goto out_close_client; + if (CHECK(err && t->expect_errno && errno != t->expect_errno, + "connect", "unexpected error %d\n", errno)) + goto out_close_client; + + done = 0; + prog_idx = PROG1; + err = bpf_map_lookup_elem(map_fd, &prog_idx, &done); + CHECK(err, "bpf_map_lookup_elem", "failed\n"); + CHECK(!done, "bpf_map_lookup_elem", "PROG1 !done\n"); + + done = 0; + prog_idx = PROG2; + err = bpf_map_lookup_elem(map_fd, &prog_idx, &done); + CHECK(err, "bpf_map_lookup_elem", "failed\n"); + CHECK(!done, "bpf_map_lookup_elem", "PROG2 !done\n"); + +out_close_client: + close(client_fd); +out_close_server: + close(server_fd); +out_unlink2: + bpf_link__destroy(link2); +out_unlink1: + bpf_link__destroy(link1); +} + +static void test_multi_prog_lookup(struct test_sk_lookup *skel) +{ + struct test_multi_prog tests[] = { + { + .desc = "multi prog - pass, pass", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "multi prog - drop, drop", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - pass, drop", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - drop, pass", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - pass, redir", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, pass", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - drop, redir", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, drop", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, redir", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + }; + struct test_multi_prog *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + t->redir_map = skel->maps.redir_map; + t->run_map = skel->maps.run_map; + if (test__start_subtest(t->desc)) + run_multi_prog_lookup(t); + } +} + +static void run_tests(struct test_sk_lookup *skel) +{ + if (test__start_subtest("query lookup prog")) + query_lookup_prog(skel); + test_redirect_lookup(skel); + test_drop_on_lookup(skel); + test_drop_on_reuseport(skel); + test_sk_assign_helper(skel); + test_multi_prog_lookup(skel); +} + +static int switch_netns(void) +{ + static const char * const setup_script[] = { + "ip -6 addr add dev lo " EXT_IP6 "/128", + "ip -6 addr add dev lo " INT_IP6 "/128", + "ip link set dev lo up", + NULL, + }; + const char * const *cmd; + int err; + + err = unshare(CLONE_NEWNET); + if (CHECK(err, "unshare", "failed\n")) { + log_err("unshare(CLONE_NEWNET)"); + return -1; + } + + for (cmd = setup_script; *cmd; cmd++) { + err = system(*cmd); + if (CHECK(err, "system", "failed\n")) { + log_err("system(%s)", *cmd); + return -1; + } + } + + return 0; +} + +void test_sk_lookup(void) +{ + struct test_sk_lookup *skel; + int err; + + err = switch_netns(); + if (err) + return; + + skel = test_sk_lookup__open_and_load(); + if (CHECK(!skel, "skel open_and_load", "failed\n")) + return; + + run_tests(skel); + + test_sk_lookup__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c new file mode 100644 index 000000000..547ae53cd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <sys/types.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> +#include "test_progs.h" +#include "network_helpers.h" +#include "test_sk_storage_trace_itself.skel.h" +#include "test_sk_storage_tracing.skel.h" + +#define LO_ADDR6 "::1" +#define TEST_COMM "test_progs" + +struct sk_stg { + __u32 pid; + __u32 last_notclose_state; + char comm[16]; +}; + +static struct test_sk_storage_tracing *skel; +static __u32 duration; +static pid_t my_pid; + +static int check_sk_stg(int sk_fd, __u32 expected_state) +{ + struct sk_stg sk_stg; + int err; + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sk_stg_map), &sk_fd, + &sk_stg); + if (!ASSERT_OK(err, "map_lookup(sk_stg_map)")) + return -1; + + if (!ASSERT_EQ(sk_stg.last_notclose_state, expected_state, + "last_notclose_state")) + return -1; + + if (!ASSERT_EQ(sk_stg.pid, my_pid, "pid")) + return -1; + + if (!ASSERT_STREQ(sk_stg.comm, skel->bss->task_comm, "task_comm")) + return -1; + + return 0; +} + +static void do_test(void) +{ + int listen_fd = -1, passive_fd = -1, active_fd = -1, value = 1, err; + char abyte; + + listen_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0); + if (CHECK(listen_fd == -1, "start_server", + "listen_fd:%d errno:%d\n", listen_fd, errno)) + return; + + active_fd = connect_to_fd(listen_fd, 0); + if (CHECK(active_fd == -1, "connect_to_fd", "active_fd:%d errno:%d\n", + active_fd, errno)) + goto out; + + err = bpf_map_update_elem(bpf_map__fd(skel->maps.del_sk_stg_map), + &active_fd, &value, 0); + if (!ASSERT_OK(err, "map_update(del_sk_stg_map)")) + goto out; + + passive_fd = accept(listen_fd, NULL, 0); + if (CHECK(passive_fd == -1, "accept", "passive_fd:%d errno:%d\n", + passive_fd, errno)) + goto out; + + shutdown(active_fd, SHUT_WR); + err = read(passive_fd, &abyte, 1); + if (!ASSERT_OK(err, "read(passive_fd)")) + goto out; + + shutdown(passive_fd, SHUT_WR); + err = read(active_fd, &abyte, 1); + if (!ASSERT_OK(err, "read(active_fd)")) + goto out; + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.del_sk_stg_map), + &active_fd, &value); + if (!ASSERT_ERR(err, "map_lookup(del_sk_stg_map)")) + goto out; + + err = check_sk_stg(listen_fd, BPF_TCP_LISTEN); + if (!ASSERT_OK(err, "listen_fd sk_stg")) + goto out; + + err = check_sk_stg(active_fd, BPF_TCP_FIN_WAIT2); + if (!ASSERT_OK(err, "active_fd sk_stg")) + goto out; + + err = check_sk_stg(passive_fd, BPF_TCP_LAST_ACK); + ASSERT_OK(err, "passive_fd sk_stg"); + +out: + if (active_fd != -1) + close(active_fd); + if (passive_fd != -1) + close(passive_fd); + if (listen_fd != -1) + close(listen_fd); +} + +void serial_test_sk_storage_tracing(void) +{ + struct test_sk_storage_trace_itself *skel_itself; + int err; + + my_pid = getpid(); + + skel_itself = test_sk_storage_trace_itself__open_and_load(); + + if (!ASSERT_NULL(skel_itself, "test_sk_storage_trace_itself")) { + test_sk_storage_trace_itself__destroy(skel_itself); + return; + } + + skel = test_sk_storage_tracing__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_sk_storage_tracing")) + return; + + err = test_sk_storage_tracing__attach(skel); + if (!ASSERT_OK(err, "test_sk_storage_tracing__attach")) { + test_sk_storage_tracing__destroy(skel); + return; + } + + do_test(); + + test_sk_storage_tracing__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c new file mode 100644 index 000000000..33f950e2d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +void test_skb_ctx(void) +{ + struct __sk_buff skb = { + .cb[0] = 1, + .cb[1] = 2, + .cb[2] = 3, + .cb[3] = 4, + .cb[4] = 5, + .priority = 6, + .ingress_ifindex = 11, + .ifindex = 1, + .tstamp = 7, + .wire_len = 100, + .gso_segs = 8, + .mark = 9, + .gso_size = 10, + .hwtstamp = 11, + }; + LIBBPF_OPTS(bpf_test_run_opts, tattr, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + .ctx_out = &skb, + .ctx_size_out = sizeof(skb), + ); + struct bpf_object *obj; + int err, prog_fd, i; + + err = bpf_prog_test_load("./test_skb_ctx.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); + if (!ASSERT_OK(err, "load")) + return; + + /* ctx_in != NULL, ctx_size_in == 0 */ + + tattr.ctx_size_in = 0; + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "ctx_size_in"); + tattr.ctx_size_in = sizeof(skb); + + /* ctx_out != NULL, ctx_size_out == 0 */ + + tattr.ctx_size_out = 0; + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "ctx_size_out"); + tattr.ctx_size_out = sizeof(skb); + + /* non-zero [len, tc_index] fields should be rejected*/ + + skb.len = 1; + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "len"); + skb.len = 0; + + skb.tc_index = 1; + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "tc_index"); + skb.tc_index = 0; + + /* non-zero [hash, sk] fields should be rejected */ + + skb.hash = 1; + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "hash"); + skb.hash = 0; + + skb.sk = (struct bpf_sock *)1; + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "sk"); + skb.sk = 0; + + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_OK(err, "test_run"); + ASSERT_OK(tattr.retval, "test_run retval"); + ASSERT_EQ(tattr.ctx_size_out, sizeof(skb), "ctx_size_out"); + + for (i = 0; i < 5; i++) + ASSERT_EQ(skb.cb[i], i + 2, "ctx_out_cb"); + ASSERT_EQ(skb.priority, 7, "ctx_out_priority"); + ASSERT_EQ(skb.ifindex, 1, "ctx_out_ifindex"); + ASSERT_EQ(skb.ingress_ifindex, 11, "ctx_out_ingress_ifindex"); + ASSERT_EQ(skb.tstamp, 8, "ctx_out_tstamp"); + ASSERT_EQ(skb.mark, 10, "ctx_out_mark"); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c new file mode 100644 index 000000000..f7ee25f29 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +void test_skb_helpers(void) +{ + struct __sk_buff skb = { + .wire_len = 100, + .gso_segs = 8, + .gso_size = 10, + }; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + .ctx_out = &skb, + .ctx_size_out = sizeof(skb), + ); + struct bpf_object *obj; + int err, prog_fd; + + err = bpf_prog_test_load("./test_skb_helpers.bpf.o", + BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (!ASSERT_OK(err, "load")) + return; + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c new file mode 100644 index 000000000..d7f83c0a4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include "skb_load_bytes.skel.h" + +void test_skb_load_bytes(void) +{ + struct skb_load_bytes *skel; + int err, prog_fd, test_result; + struct __sk_buff skb = { 0 }; + + LIBBPF_OPTS(bpf_test_run_opts, tattr, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + ); + + skel = skb_load_bytes__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + prog_fd = bpf_program__fd(skel->progs.skb_process); + if (!ASSERT_GE(prog_fd, 0, "prog_fd")) + goto out; + + skel->bss->load_offset = (uint32_t)(-1); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + goto out; + test_result = skel->bss->test_result; + if (!ASSERT_EQ(test_result, -EFAULT, "offset -1")) + goto out; + + skel->bss->load_offset = (uint32_t)10; + err = bpf_prog_test_run_opts(prog_fd, &tattr); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + goto out; + test_result = skel->bss->test_result; + if (!ASSERT_EQ(test_result, 0, "offset 10")) + goto out; + +out: + skb_load_bytes__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c b/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c new file mode 100644 index 000000000..3eefdfed1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Hengqi Chen */ + +#include <test_progs.h> +#include <sys/un.h> +#include "test_skc_to_unix_sock.skel.h" + +static const char *sock_path = "@skc_to_unix_sock"; + +void test_skc_to_unix_sock(void) +{ + struct test_skc_to_unix_sock *skel; + struct sockaddr_un sockaddr; + int err, sockfd = 0; + + skel = test_skc_to_unix_sock__open(); + if (!ASSERT_OK_PTR(skel, "could not open BPF object")) + return; + + skel->rodata->my_pid = getpid(); + + err = test_skc_to_unix_sock__load(skel); + if (!ASSERT_OK(err, "could not load BPF object")) + goto cleanup; + + err = test_skc_to_unix_sock__attach(skel); + if (!ASSERT_OK(err, "could not attach BPF object")) + goto cleanup; + + /* trigger unix_listen */ + sockfd = socket(AF_UNIX, SOCK_STREAM, 0); + if (!ASSERT_GT(sockfd, 0, "socket failed")) + goto cleanup; + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_UNIX; + strncpy(sockaddr.sun_path, sock_path, strlen(sock_path)); + sockaddr.sun_path[0] = '\0'; + + err = bind(sockfd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)); + if (!ASSERT_OK(err, "bind failed")) + goto cleanup; + + err = listen(sockfd, 1); + if (!ASSERT_OK(err, "listen failed")) + goto cleanup; + + ASSERT_EQ(strcmp(skel->bss->path, sock_path), 0, "bpf_skc_to_unix_sock failed"); + +cleanup: + if (sockfd) + close(sockfd); + test_skc_to_unix_sock__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c new file mode 100644 index 000000000..99dac5292 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <test_progs.h> + +struct s { + int a; + long long b; +} __attribute__((packed)); + +#include "test_skeleton.skel.h" + +void test_skeleton(void) +{ + int duration = 0, err; + struct test_skeleton* skel; + struct test_skeleton__bss *bss; + struct test_skeleton__data *data; + struct test_skeleton__data_dyn *data_dyn; + struct test_skeleton__rodata *rodata; + struct test_skeleton__rodata_dyn *rodata_dyn; + struct test_skeleton__kconfig *kcfg; + const void *elf_bytes; + size_t elf_bytes_sz = 0; + int i; + + skel = test_skeleton__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + if (CHECK(skel->kconfig, "skel_kconfig", "kconfig is mmaped()!\n")) + goto cleanup; + + bss = skel->bss; + data = skel->data; + data_dyn = skel->data_dyn; + rodata = skel->rodata; + rodata_dyn = skel->rodata_dyn; + + ASSERT_STREQ(bpf_map__name(skel->maps.rodata_dyn), ".rodata.dyn", "rodata_dyn_name"); + ASSERT_STREQ(bpf_map__name(skel->maps.data_dyn), ".data.dyn", "data_dyn_name"); + + /* validate values are pre-initialized correctly */ + CHECK(data->in1 != -1, "in1", "got %d != exp %d\n", data->in1, -1); + CHECK(data->out1 != -1, "out1", "got %d != exp %d\n", data->out1, -1); + CHECK(data->in2 != -1, "in2", "got %lld != exp %lld\n", data->in2, -1LL); + CHECK(data->out2 != -1, "out2", "got %lld != exp %lld\n", data->out2, -1LL); + + CHECK(bss->in3 != 0, "in3", "got %d != exp %d\n", bss->in3, 0); + CHECK(bss->out3 != 0, "out3", "got %d != exp %d\n", bss->out3, 0); + CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL); + CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL); + + CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0); + CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0); + + ASSERT_EQ(rodata_dyn->in_dynarr_sz, 0, "in_dynarr_sz"); + for (i = 0; i < 4; i++) + ASSERT_EQ(rodata_dyn->in_dynarr[i], -(i + 1), "in_dynarr"); + for (i = 0; i < 4; i++) + ASSERT_EQ(data_dyn->out_dynarr[i], i + 1, "out_dynarr"); + + /* validate we can pre-setup global variables, even in .bss */ + data->in1 = 10; + data->in2 = 11; + bss->in3 = 12; + bss->in4 = 13; + rodata->in.in6 = 14; + + rodata_dyn->in_dynarr_sz = 4; + for (i = 0; i < 4; i++) + rodata_dyn->in_dynarr[i] = i + 10; + + err = test_skeleton__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + goto cleanup; + + /* validate pre-setup values are still there */ + CHECK(data->in1 != 10, "in1", "got %d != exp %d\n", data->in1, 10); + CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL); + CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12); + CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL); + CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14); + + ASSERT_EQ(rodata_dyn->in_dynarr_sz, 4, "in_dynarr_sz"); + for (i = 0; i < 4; i++) + ASSERT_EQ(rodata_dyn->in_dynarr[i], i + 10, "in_dynarr"); + + /* now set new values and attach to get them into outX variables */ + data->in1 = 1; + data->in2 = 2; + bss->in3 = 3; + bss->in4 = 4; + bss->in5.a = 5; + bss->in5.b = 6; + kcfg = skel->kconfig; + + skel->data_read_mostly->read_mostly_var = 123; + + err = test_skeleton__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + CHECK(data->out1 != 1, "res1", "got %d != exp %d\n", data->out1, 1); + CHECK(data->out2 != 2, "res2", "got %lld != exp %d\n", data->out2, 2); + CHECK(bss->out3 != 3, "res3", "got %d != exp %d\n", (int)bss->out3, 3); + CHECK(bss->out4 != 4, "res4", "got %lld != exp %d\n", bss->out4, 4); + CHECK(bss->out5.a != 5, "res5", "got %d != exp %d\n", bss->out5.a, 5); + CHECK(bss->out5.b != 6, "res6", "got %lld != exp %d\n", bss->out5.b, 6); + CHECK(bss->out6 != 14, "res7", "got %d != exp %d\n", bss->out6, 14); + + CHECK(bss->bpf_syscall != kcfg->CONFIG_BPF_SYSCALL, "ext1", + "got %d != exp %d\n", bss->bpf_syscall, kcfg->CONFIG_BPF_SYSCALL); + CHECK(bss->kern_ver != kcfg->LINUX_KERNEL_VERSION, "ext2", + "got %d != exp %d\n", bss->kern_ver, kcfg->LINUX_KERNEL_VERSION); + + for (i = 0; i < 4; i++) + ASSERT_EQ(data_dyn->out_dynarr[i], i + 10, "out_dynarr"); + + ASSERT_EQ(skel->bss->out_mostly_var, 123, "out_mostly_var"); + + ASSERT_EQ(bss->huge_arr[ARRAY_SIZE(bss->huge_arr) - 1], 123, "huge_arr"); + + elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz); + ASSERT_OK_PTR(elf_bytes, "elf_bytes"); + ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz"); + +cleanup: + test_skeleton__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c new file mode 100644 index 000000000..4be6fdb78 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Google LLC. */ + +#include <test_progs.h> +#include "test_snprintf.skel.h" +#include "test_snprintf_single.skel.h" + +#define EXP_NUM_OUT "-8 9 96 -424242 1337 DABBAD00" +#define EXP_NUM_RET sizeof(EXP_NUM_OUT) + +#define EXP_IP_OUT "127.000.000.001 0000:0000:0000:0000:0000:0000:0000:0001" +#define EXP_IP_RET sizeof(EXP_IP_OUT) + +/* The third specifier, %pB, depends on compiler inlining so don't check it */ +#define EXP_SYM_OUT "schedule schedule+0x0/" +#define MIN_SYM_RET sizeof(EXP_SYM_OUT) + +/* The third specifier, %p, is a hashed pointer which changes on every reboot */ +#define EXP_ADDR_OUT "0000000000000000 ffff00000add4e55 " +#define EXP_ADDR_RET sizeof(EXP_ADDR_OUT "unknownhashedptr") + +#define EXP_STR_OUT "str1 a b c d e longstr" +#define EXP_STR_RET sizeof(EXP_STR_OUT) + +#define EXP_OVER_OUT "%over" +#define EXP_OVER_RET 10 + +#define EXP_PAD_OUT " 4 000" +#define EXP_PAD_RET 900007 + +#define EXP_NO_ARG_OUT "simple case" +#define EXP_NO_ARG_RET 12 + +#define EXP_NO_BUF_RET 29 + +static void test_snprintf_positive(void) +{ + char exp_addr_out[] = EXP_ADDR_OUT; + char exp_sym_out[] = EXP_SYM_OUT; + struct test_snprintf *skel; + + skel = test_snprintf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->pid = getpid(); + + if (!ASSERT_OK(test_snprintf__attach(skel), "skel_attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + ASSERT_STREQ(skel->bss->num_out, EXP_NUM_OUT, "num_out"); + ASSERT_EQ(skel->bss->num_ret, EXP_NUM_RET, "num_ret"); + + ASSERT_STREQ(skel->bss->ip_out, EXP_IP_OUT, "ip_out"); + ASSERT_EQ(skel->bss->ip_ret, EXP_IP_RET, "ip_ret"); + + ASSERT_OK(memcmp(skel->bss->sym_out, exp_sym_out, + sizeof(exp_sym_out) - 1), "sym_out"); + ASSERT_LT(MIN_SYM_RET, skel->bss->sym_ret, "sym_ret"); + + ASSERT_OK(memcmp(skel->bss->addr_out, exp_addr_out, + sizeof(exp_addr_out) - 1), "addr_out"); + ASSERT_EQ(skel->bss->addr_ret, EXP_ADDR_RET, "addr_ret"); + + ASSERT_STREQ(skel->bss->str_out, EXP_STR_OUT, "str_out"); + ASSERT_EQ(skel->bss->str_ret, EXP_STR_RET, "str_ret"); + + ASSERT_STREQ(skel->bss->over_out, EXP_OVER_OUT, "over_out"); + ASSERT_EQ(skel->bss->over_ret, EXP_OVER_RET, "over_ret"); + + ASSERT_STREQ(skel->bss->pad_out, EXP_PAD_OUT, "pad_out"); + ASSERT_EQ(skel->bss->pad_ret, EXP_PAD_RET, "pad_ret"); + + ASSERT_STREQ(skel->bss->noarg_out, EXP_NO_ARG_OUT, "no_arg_out"); + ASSERT_EQ(skel->bss->noarg_ret, EXP_NO_ARG_RET, "no_arg_ret"); + + ASSERT_EQ(skel->bss->nobuf_ret, EXP_NO_BUF_RET, "no_buf_ret"); + +cleanup: + test_snprintf__destroy(skel); +} + +/* Loads an eBPF object calling bpf_snprintf with up to 10 characters of fmt */ +static int load_single_snprintf(char *fmt) +{ + struct test_snprintf_single *skel; + int ret; + + skel = test_snprintf_single__open(); + if (!skel) + return -EINVAL; + + memcpy(skel->rodata->fmt, fmt, MIN(strlen(fmt) + 1, 10)); + + ret = test_snprintf_single__load(skel); + test_snprintf_single__destroy(skel); + + return ret; +} + +static void test_snprintf_negative(void) +{ + ASSERT_OK(load_single_snprintf("valid %d"), "valid usage"); + + ASSERT_ERR(load_single_snprintf("0123456789"), "no terminating zero"); + ASSERT_ERR(load_single_snprintf("%d %d"), "too many specifiers"); + ASSERT_ERR(load_single_snprintf("%pi5"), "invalid specifier 1"); + ASSERT_ERR(load_single_snprintf("%a"), "invalid specifier 2"); + ASSERT_ERR(load_single_snprintf("%"), "invalid specifier 3"); + ASSERT_ERR(load_single_snprintf("%12345678"), "invalid specifier 4"); + ASSERT_ERR(load_single_snprintf("%--------"), "invalid specifier 5"); + ASSERT_ERR(load_single_snprintf("%lc"), "invalid specifier 6"); + ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7"); + ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character"); + ASSERT_ERR(load_single_snprintf("\x1"), "non printable character"); +} + +void test_snprintf(void) +{ + if (test__start_subtest("snprintf_positive")) + test_snprintf_positive(); + if (test__start_subtest("snprintf_negative")) + test_snprintf_negative(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c new file mode 100644 index 000000000..dd41b826b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <linux/btf.h> +#include "netif_receive_skb.skel.h" + +/* Demonstrate that bpf_snprintf_btf succeeds and that various data types + * are formatted correctly. + */ +void serial_test_snprintf_btf(void) +{ + struct netif_receive_skb *skel; + struct netif_receive_skb__bss *bss; + int err, duration = 0; + + skel = netif_receive_skb__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + err = netif_receive_skb__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + goto cleanup; + + bss = skel->bss; + + err = netif_receive_skb__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* generate receive event */ + err = system("ping -c 1 127.0.0.1 > /dev/null"); + if (CHECK(err, "system", "ping failed: %d\n", err)) + goto cleanup; + + if (bss->skip) { + printf("%s:SKIP:no __builtin_btf_type_id\n", __func__); + test__skip(); + goto cleanup; + } + + /* + * Make sure netif_receive_skb program was triggered + * and it set expected return values from bpf_trace_printk()s + * and all tests ran. + */ + if (!ASSERT_GT(bss->ret, 0, "bpf_snprintf_ret")) + goto cleanup; + + if (CHECK(bss->ran_subtests == 0, "check if subtests ran", + "no subtests ran, did BPF program run?")) + goto cleanup; + + if (CHECK(bss->num_subtests != bss->ran_subtests, + "check all subtests ran", + "only ran %d of %d tests\n", bss->num_subtests, + bss->ran_subtests)) + goto cleanup; + +cleanup: + netif_receive_skb__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c new file mode 100644 index 000000000..7d23166c7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#define _GNU_SOURCE +#include <netinet/in.h> +#include <arpa/inet.h> +#include <unistd.h> +#include <sched.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> +#include <linux/compiler.h> + +#include "network_helpers.h" +#include "cgroup_helpers.h" +#include "test_progs.h" +#include "test_sock_fields.skel.h" + +enum bpf_linum_array_idx { + EGRESS_LINUM_IDX, + INGRESS_LINUM_IDX, + READ_SK_DST_PORT_LINUM_IDX, + __NR_BPF_LINUM_ARRAY_IDX, +}; + +struct bpf_spinlock_cnt { + struct bpf_spin_lock lock; + __u32 cnt; +}; + +#define PARENT_CGROUP "/test-bpf-sock-fields" +#define CHILD_CGROUP "/test-bpf-sock-fields/child" +#define DATA "Hello BPF!" +#define DATA_LEN sizeof(DATA) + +static struct sockaddr_in6 srv_sa6, cli_sa6; +static int sk_pkt_out_cnt10_fd; +static struct test_sock_fields *skel; +static int sk_pkt_out_cnt_fd; +static __u64 parent_cg_id; +static __u64 child_cg_id; +static int linum_map_fd; +static __u32 duration; + +static bool create_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return false; + + if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo")) + return false; + + return true; +} + +static void print_sk(const struct bpf_sock *sk, const char *prefix) +{ + char src_ip4[24], dst_ip4[24]; + char src_ip6[64], dst_ip6[64]; + + inet_ntop(AF_INET, &sk->src_ip4, src_ip4, sizeof(src_ip4)); + inet_ntop(AF_INET6, &sk->src_ip6, src_ip6, sizeof(src_ip6)); + inet_ntop(AF_INET, &sk->dst_ip4, dst_ip4, sizeof(dst_ip4)); + inet_ntop(AF_INET6, &sk->dst_ip6, dst_ip6, sizeof(dst_ip6)); + + printf("%s: state:%u bound_dev_if:%u family:%u type:%u protocol:%u mark:%u priority:%u " + "src_ip4:%x(%s) src_ip6:%x:%x:%x:%x(%s) src_port:%u " + "dst_ip4:%x(%s) dst_ip6:%x:%x:%x:%x(%s) dst_port:%u\n", + prefix, + sk->state, sk->bound_dev_if, sk->family, sk->type, sk->protocol, + sk->mark, sk->priority, + sk->src_ip4, src_ip4, + sk->src_ip6[0], sk->src_ip6[1], sk->src_ip6[2], sk->src_ip6[3], + src_ip6, sk->src_port, + sk->dst_ip4, dst_ip4, + sk->dst_ip6[0], sk->dst_ip6[1], sk->dst_ip6[2], sk->dst_ip6[3], + dst_ip6, ntohs(sk->dst_port)); +} + +static void print_tp(const struct bpf_tcp_sock *tp, const char *prefix) +{ + printf("%s: snd_cwnd:%u srtt_us:%u rtt_min:%u snd_ssthresh:%u rcv_nxt:%u " + "snd_nxt:%u snd:una:%u mss_cache:%u ecn_flags:%u " + "rate_delivered:%u rate_interval_us:%u packets_out:%u " + "retrans_out:%u total_retrans:%u segs_in:%u data_segs_in:%u " + "segs_out:%u data_segs_out:%u lost_out:%u sacked_out:%u " + "bytes_received:%llu bytes_acked:%llu\n", + prefix, + tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh, + tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache, + tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us, + tp->packets_out, tp->retrans_out, tp->total_retrans, + tp->segs_in, tp->data_segs_in, tp->segs_out, + tp->data_segs_out, tp->lost_out, tp->sacked_out, + tp->bytes_received, tp->bytes_acked); +} + +static void check_result(void) +{ + struct bpf_tcp_sock srv_tp, cli_tp, listen_tp; + struct bpf_sock srv_sk, cli_sk, listen_sk; + __u32 idx, ingress_linum, egress_linum, linum; + int err; + + idx = EGRESS_LINUM_IDX; + err = bpf_map_lookup_elem(linum_map_fd, &idx, &egress_linum); + CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)", + "err:%d errno:%d\n", err, errno); + + idx = INGRESS_LINUM_IDX; + err = bpf_map_lookup_elem(linum_map_fd, &idx, &ingress_linum); + CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)", + "err:%d errno:%d\n", err, errno); + + idx = READ_SK_DST_PORT_LINUM_IDX; + err = bpf_map_lookup_elem(linum_map_fd, &idx, &linum); + ASSERT_OK(err, "bpf_map_lookup_elem(linum_map_fd, READ_SK_DST_PORT_IDX)"); + ASSERT_EQ(linum, 0, "failure in read_sk_dst_port on line"); + + memcpy(&srv_sk, &skel->bss->srv_sk, sizeof(srv_sk)); + memcpy(&srv_tp, &skel->bss->srv_tp, sizeof(srv_tp)); + memcpy(&cli_sk, &skel->bss->cli_sk, sizeof(cli_sk)); + memcpy(&cli_tp, &skel->bss->cli_tp, sizeof(cli_tp)); + memcpy(&listen_sk, &skel->bss->listen_sk, sizeof(listen_sk)); + memcpy(&listen_tp, &skel->bss->listen_tp, sizeof(listen_tp)); + + print_sk(&listen_sk, "listen_sk"); + print_sk(&srv_sk, "srv_sk"); + print_sk(&cli_sk, "cli_sk"); + print_tp(&listen_tp, "listen_tp"); + print_tp(&srv_tp, "srv_tp"); + print_tp(&cli_tp, "cli_tp"); + + CHECK(listen_sk.state != 10 || + listen_sk.family != AF_INET6 || + listen_sk.protocol != IPPROTO_TCP || + memcmp(listen_sk.src_ip6, &in6addr_loopback, + sizeof(listen_sk.src_ip6)) || + listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] || + listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] || + listen_sk.src_port != ntohs(srv_sa6.sin6_port) || + listen_sk.dst_port, + "listen_sk", + "Unexpected. Check listen_sk output. ingress_linum:%u\n", + ingress_linum); + + CHECK(srv_sk.state == 10 || + !srv_sk.state || + srv_sk.family != AF_INET6 || + srv_sk.protocol != IPPROTO_TCP || + memcmp(srv_sk.src_ip6, &in6addr_loopback, + sizeof(srv_sk.src_ip6)) || + memcmp(srv_sk.dst_ip6, &in6addr_loopback, + sizeof(srv_sk.dst_ip6)) || + srv_sk.src_port != ntohs(srv_sa6.sin6_port) || + srv_sk.dst_port != cli_sa6.sin6_port, + "srv_sk", "Unexpected. Check srv_sk output. egress_linum:%u\n", + egress_linum); + + CHECK(!skel->bss->lsndtime, "srv_tp", "Unexpected lsndtime:0\n"); + + CHECK(cli_sk.state == 10 || + !cli_sk.state || + cli_sk.family != AF_INET6 || + cli_sk.protocol != IPPROTO_TCP || + memcmp(cli_sk.src_ip6, &in6addr_loopback, + sizeof(cli_sk.src_ip6)) || + memcmp(cli_sk.dst_ip6, &in6addr_loopback, + sizeof(cli_sk.dst_ip6)) || + cli_sk.src_port != ntohs(cli_sa6.sin6_port) || + cli_sk.dst_port != srv_sa6.sin6_port, + "cli_sk", "Unexpected. Check cli_sk output. egress_linum:%u\n", + egress_linum); + + CHECK(listen_tp.data_segs_out || + listen_tp.data_segs_in || + listen_tp.total_retrans || + listen_tp.bytes_acked, + "listen_tp", + "Unexpected. Check listen_tp output. ingress_linum:%u\n", + ingress_linum); + + CHECK(srv_tp.data_segs_out != 2 || + srv_tp.data_segs_in || + srv_tp.snd_cwnd != 10 || + srv_tp.total_retrans || + srv_tp.bytes_acked < 2 * DATA_LEN, + "srv_tp", "Unexpected. Check srv_tp output. egress_linum:%u\n", + egress_linum); + + CHECK(cli_tp.data_segs_out || + cli_tp.data_segs_in != 2 || + cli_tp.snd_cwnd != 10 || + cli_tp.total_retrans || + cli_tp.bytes_received < 2 * DATA_LEN, + "cli_tp", "Unexpected. Check cli_tp output. egress_linum:%u\n", + egress_linum); + + CHECK(skel->bss->parent_cg_id != parent_cg_id, + "parent_cg_id", "%zu != %zu\n", + (size_t)skel->bss->parent_cg_id, (size_t)parent_cg_id); + + CHECK(skel->bss->child_cg_id != child_cg_id, + "child_cg_id", "%zu != %zu\n", + (size_t)skel->bss->child_cg_id, (size_t)child_cg_id); +} + +static void check_sk_pkt_out_cnt(int accept_fd, int cli_fd) +{ + struct bpf_spinlock_cnt pkt_out_cnt = {}, pkt_out_cnt10 = {}; + int err; + + pkt_out_cnt.cnt = ~0; + pkt_out_cnt10.cnt = ~0; + err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &accept_fd, &pkt_out_cnt); + if (!err) + err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &accept_fd, + &pkt_out_cnt10); + + /* The bpf prog only counts for fullsock and + * passive connection did not become fullsock until 3WHS + * had been finished, so the bpf prog only counted two data + * packet out. + */ + CHECK(err || pkt_out_cnt.cnt < 0xeB9F + 2 || + pkt_out_cnt10.cnt < 0xeB9F + 20, + "bpf_map_lookup_elem(sk_pkt_out_cnt, &accept_fd)", + "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u\n", + err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt); + + pkt_out_cnt.cnt = ~0; + pkt_out_cnt10.cnt = ~0; + err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &cli_fd, &pkt_out_cnt); + if (!err) + err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &cli_fd, + &pkt_out_cnt10); + /* Active connection is fullsock from the beginning. + * 1 SYN and 1 ACK during 3WHS + * 2 Acks on data packet. + * + * The bpf_prog initialized it to 0xeB9F. + */ + CHECK(err || pkt_out_cnt.cnt < 0xeB9F + 4 || + pkt_out_cnt10.cnt < 0xeB9F + 40, + "bpf_map_lookup_elem(sk_pkt_out_cnt, &cli_fd)", + "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u\n", + err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt); +} + +static int init_sk_storage(int sk_fd, __u32 pkt_out_cnt) +{ + struct bpf_spinlock_cnt scnt = {}; + int err; + + scnt.cnt = pkt_out_cnt; + err = bpf_map_update_elem(sk_pkt_out_cnt_fd, &sk_fd, &scnt, + BPF_NOEXIST); + if (CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt_fd)", + "err:%d errno:%d\n", err, errno)) + return err; + + err = bpf_map_update_elem(sk_pkt_out_cnt10_fd, &sk_fd, &scnt, + BPF_NOEXIST); + if (CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt10_fd)", + "err:%d errno:%d\n", err, errno)) + return err; + + return 0; +} + +static void test(void) +{ + int listen_fd = -1, cli_fd = -1, accept_fd = -1, err, i; + socklen_t addrlen = sizeof(struct sockaddr_in6); + char buf[DATA_LEN]; + + /* Prepare listen_fd */ + listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0xcafe, 0); + /* start_server() has logged the error details */ + if (CHECK_FAIL(listen_fd == -1)) + goto done; + + err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen); + if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err, + errno)) + goto done; + memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6)); + + cli_fd = connect_to_fd(listen_fd, 0); + if (CHECK_FAIL(cli_fd == -1)) + goto done; + + err = getsockname(cli_fd, (struct sockaddr *)&cli_sa6, &addrlen); + if (CHECK(err, "getsockname(cli_fd)", "err:%d errno:%d\n", + err, errno)) + goto done; + + accept_fd = accept(listen_fd, NULL, NULL); + if (CHECK(accept_fd == -1, "accept(listen_fd)", + "accept_fd:%d errno:%d\n", + accept_fd, errno)) + goto done; + + if (init_sk_storage(accept_fd, 0xeB9F)) + goto done; + + for (i = 0; i < 2; i++) { + /* Send some data from accept_fd to cli_fd. + * MSG_EOR to stop kernel from coalescing two pkts. + */ + err = send(accept_fd, DATA, DATA_LEN, MSG_EOR); + if (CHECK(err != DATA_LEN, "send(accept_fd)", + "err:%d errno:%d\n", err, errno)) + goto done; + + err = recv(cli_fd, buf, DATA_LEN, 0); + if (CHECK(err != DATA_LEN, "recv(cli_fd)", "err:%d errno:%d\n", + err, errno)) + goto done; + } + + shutdown(cli_fd, SHUT_WR); + err = recv(accept_fd, buf, 1, 0); + if (CHECK(err, "recv(accept_fd) for fin", "err:%d errno:%d\n", + err, errno)) + goto done; + shutdown(accept_fd, SHUT_WR); + err = recv(cli_fd, buf, 1, 0); + if (CHECK(err, "recv(cli_fd) for fin", "err:%d errno:%d\n", + err, errno)) + goto done; + check_sk_pkt_out_cnt(accept_fd, cli_fd); + check_result(); + +done: + if (accept_fd != -1) + close(accept_fd); + if (cli_fd != -1) + close(cli_fd); + if (listen_fd != -1) + close(listen_fd); +} + +void serial_test_sock_fields(void) +{ + int parent_cg_fd = -1, child_cg_fd = -1; + struct bpf_link *link; + + /* Use a dedicated netns to have a fixed listen port */ + if (!create_netns()) + return; + + /* Create a cgroup, get fd, and join it */ + parent_cg_fd = test__join_cgroup(PARENT_CGROUP); + if (CHECK_FAIL(parent_cg_fd < 0)) + return; + parent_cg_id = get_cgroup_id(PARENT_CGROUP); + if (CHECK_FAIL(!parent_cg_id)) + goto done; + + child_cg_fd = test__join_cgroup(CHILD_CGROUP); + if (CHECK_FAIL(child_cg_fd < 0)) + goto done; + child_cg_id = get_cgroup_id(CHILD_CGROUP); + if (CHECK_FAIL(!child_cg_id)) + goto done; + + skel = test_sock_fields__open_and_load(); + if (CHECK(!skel, "test_sock_fields__open_and_load", "failed\n")) + goto done; + + link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, child_cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(egress_read_sock_fields)")) + goto done; + skel->links.egress_read_sock_fields = link; + + link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, child_cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(ingress_read_sock_fields)")) + goto done; + skel->links.ingress_read_sock_fields = link; + + link = bpf_program__attach_cgroup(skel->progs.read_sk_dst_port, child_cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(read_sk_dst_port")) + goto done; + skel->links.read_sk_dst_port = link; + + linum_map_fd = bpf_map__fd(skel->maps.linum_map); + sk_pkt_out_cnt_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt); + sk_pkt_out_cnt10_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt10); + + test(); + +done: + test_sock_fields__destroy(skel); + if (child_cg_fd >= 0) + close(child_cg_fd); + if (parent_cg_fd >= 0) + close(parent_cg_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/socket_cookie.c b/tools/testing/selftests/bpf/prog_tests/socket_cookie.c new file mode 100644 index 000000000..232db28dd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/socket_cookie.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Google LLC. +// Copyright (c) 2018 Facebook + +#include <test_progs.h> +#include "socket_cookie_prog.skel.h" +#include "network_helpers.h" + +static int duration; + +struct socket_cookie { + __u64 cookie_key; + __u32 cookie_value; +}; + +void test_socket_cookie(void) +{ + int server_fd = 0, client_fd = 0, cgroup_fd = 0, err = 0; + socklen_t addr_len = sizeof(struct sockaddr_in6); + struct socket_cookie_prog *skel; + __u32 cookie_expected_value; + struct sockaddr_in6 addr; + struct socket_cookie val; + + skel = socket_cookie_prog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + cgroup_fd = test__join_cgroup("/socket_cookie"); + if (CHECK(cgroup_fd < 0, "join_cgroup", "cgroup creation failed\n")) + goto out; + + skel->links.set_cookie = bpf_program__attach_cgroup( + skel->progs.set_cookie, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach")) + goto close_cgroup_fd; + + skel->links.update_cookie_sockops = bpf_program__attach_cgroup( + skel->progs.update_cookie_sockops, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach")) + goto close_cgroup_fd; + + skel->links.update_cookie_tracing = bpf_program__attach( + skel->progs.update_cookie_tracing); + if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach")) + goto close_cgroup_fd; + + server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + if (CHECK(server_fd < 0, "start_server", "errno %d\n", errno)) + goto close_cgroup_fd; + + client_fd = connect_to_fd(server_fd, 0); + if (CHECK(client_fd < 0, "connect_to_fd", "errno %d\n", errno)) + goto close_server_fd; + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies), + &client_fd, &val); + if (!ASSERT_OK(err, "map_lookup(socket_cookies)")) + goto close_client_fd; + + err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len); + if (!ASSERT_OK(err, "getsockname")) + goto close_client_fd; + + cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF; + ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value"); + +close_client_fd: + close(client_fd); +close_server_fd: + close(server_fd); +close_cgroup_fd: + close(cgroup_fd); +out: + socket_cookie_prog__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c new file mode 100644 index 000000000..0aa088900 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Cloudflare +#include <error.h> +#include <netinet/tcp.h> + +#include "test_progs.h" +#include "test_skmsg_load_helpers.skel.h" +#include "test_sockmap_update.skel.h" +#include "test_sockmap_invalid_update.skel.h" +#include "test_sockmap_skb_verdict_attach.skel.h" +#include "test_sockmap_progs_query.skel.h" +#include "bpf_iter_sockmap.skel.h" + +#define TCP_REPAIR 19 /* TCP sock is under repair right now */ + +#define TCP_REPAIR_ON 1 +#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ + +static int connected_socket_v4(void) +{ + struct sockaddr_in addr = { + .sin_family = AF_INET, + .sin_port = htons(80), + .sin_addr = { inet_addr("127.0.0.1") }, + }; + socklen_t len = sizeof(addr); + int s, repair, err; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(s, 0, "socket")) + goto error; + + repair = TCP_REPAIR_ON; + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); + if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) + goto error; + + err = connect(s, (struct sockaddr *)&addr, len); + if (!ASSERT_OK(err, "connect")) + goto error; + + repair = TCP_REPAIR_OFF_NO_WP; + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); + if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) + goto error; + + return s; +error: + perror(__func__); + close(s); + return -1; +} + +static void compare_cookies(struct bpf_map *src, struct bpf_map *dst) +{ + __u32 i, max_entries = bpf_map__max_entries(src); + int err, src_fd, dst_fd; + + src_fd = bpf_map__fd(src); + dst_fd = bpf_map__fd(dst); + + for (i = 0; i < max_entries; i++) { + __u64 src_cookie, dst_cookie; + + err = bpf_map_lookup_elem(src_fd, &i, &src_cookie); + if (err && errno == ENOENT) { + err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); + ASSERT_ERR(err, "map_lookup_elem(dst)"); + ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)"); + continue; + } + if (!ASSERT_OK(err, "lookup_elem(src)")) + continue; + + err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); + if (!ASSERT_OK(err, "lookup_elem(dst)")) + continue; + + ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch"); + } +} + +/* Create a map, populate it with one socket, and free the map. */ +static void test_sockmap_create_update_free(enum bpf_map_type map_type) +{ + const int zero = 0; + int s, map, err; + + s = connected_socket_v4(); + if (!ASSERT_GE(s, 0, "connected_socket_v4")) + return; + + map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); + if (!ASSERT_GE(map, 0, "bpf_map_create")) + goto out; + + err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update")) + goto out; + +out: + close(map); + close(s); +} + +static void test_skmsg_helpers(enum bpf_map_type map_type) +{ + struct test_skmsg_load_helpers *skel; + int err, map, verdict; + + skel = test_skmsg_load_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load")) + return; + + verdict = bpf_program__fd(skel->progs.prog_msg_verdict); + map = bpf_map__fd(skel->maps.sock_map); + + err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + + err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT); + if (!ASSERT_OK(err, "bpf_prog_detach2")) + goto out; +out: + test_skmsg_load_helpers__destroy(skel); +} + +static void test_sockmap_update(enum bpf_map_type map_type) +{ + int err, prog, src; + struct test_sockmap_update *skel; + struct bpf_map *dst_map; + const __u32 zero = 0; + char dummy[14] = {0}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = dummy, + .data_size_in = sizeof(dummy), + .repeat = 1, + ); + __s64 sk; + + sk = connected_socket_v4(); + if (!ASSERT_NEQ(sk, -1, "connected_socket_v4")) + return; + + skel = test_sockmap_update__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + goto close_sk; + + prog = bpf_program__fd(skel->progs.copy_sock_map); + src = bpf_map__fd(skel->maps.src); + if (map_type == BPF_MAP_TYPE_SOCKMAP) + dst_map = skel->maps.dst_sock_map; + else + dst_map = skel->maps.dst_sock_hash; + + err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST); + if (!ASSERT_OK(err, "update_elem(src)")) + goto out; + + err = bpf_prog_test_run_opts(prog, &topts); + if (!ASSERT_OK(err, "test_run")) + goto out; + if (!ASSERT_NEQ(topts.retval, 0, "test_run retval")) + goto out; + + compare_cookies(skel->maps.src, dst_map); + +out: + test_sockmap_update__destroy(skel); +close_sk: + close(sk); +} + +static void test_sockmap_invalid_update(void) +{ + struct test_sockmap_invalid_update *skel; + + skel = test_sockmap_invalid_update__open_and_load(); + if (!ASSERT_NULL(skel, "open_and_load")) + test_sockmap_invalid_update__destroy(skel); +} + +static void test_sockmap_copy(enum bpf_map_type map_type) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + int err, len, src_fd, iter_fd; + union bpf_iter_link_info linfo = {}; + __u32 i, num_sockets, num_elems; + struct bpf_iter_sockmap *skel; + __s64 *sock_fd = NULL; + struct bpf_link *link; + struct bpf_map *src; + char buf[64]; + + skel = bpf_iter_sockmap__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) + return; + + if (map_type == BPF_MAP_TYPE_SOCKMAP) { + src = skel->maps.sockmap; + num_elems = bpf_map__max_entries(src); + num_sockets = num_elems - 1; + } else { + src = skel->maps.sockhash; + num_elems = bpf_map__max_entries(src) - 1; + num_sockets = num_elems; + } + + sock_fd = calloc(num_sockets, sizeof(*sock_fd)); + if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)")) + goto out; + + for (i = 0; i < num_sockets; i++) + sock_fd[i] = -1; + + src_fd = bpf_map__fd(src); + + for (i = 0; i < num_sockets; i++) { + sock_fd[i] = connected_socket_v4(); + if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4")) + goto out; + + err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST); + if (!ASSERT_OK(err, "map_update")) + goto out; + } + + linfo.map.map_fd = src_fd; + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(skel->progs.copy, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (!ASSERT_GE(len, 0, "read")) + goto close_iter; + + /* test results */ + if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems")) + goto close_iter; + + if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks")) + goto close_iter; + + compare_cookies(src, skel->maps.dst); + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + for (i = 0; sock_fd && i < num_sockets; i++) + if (sock_fd[i] >= 0) + close(sock_fd[i]); + if (sock_fd) + free(sock_fd); + bpf_iter_sockmap__destroy(skel); +} + +static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first, + enum bpf_attach_type second) +{ + struct test_sockmap_skb_verdict_attach *skel; + int err, map, verdict; + + skel = test_sockmap_skb_verdict_attach__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + verdict = bpf_program__fd(skel->progs.prog_skb_verdict); + map = bpf_map__fd(skel->maps.sock_map); + + err = bpf_prog_attach(verdict, map, first, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + + err = bpf_prog_attach(verdict, map, second, 0); + ASSERT_EQ(err, -EBUSY, "prog_attach_fail"); + + err = bpf_prog_detach2(verdict, map, first); + if (!ASSERT_OK(err, "bpf_prog_detach2")) + goto out; +out: + test_sockmap_skb_verdict_attach__destroy(skel); +} + +static __u32 query_prog_id(int prog_fd) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + int err; + + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd") || + !ASSERT_EQ(info_len, sizeof(info), "bpf_obj_get_info_by_fd")) + return 0; + + return info.id; +} + +static void test_sockmap_progs_query(enum bpf_attach_type attach_type) +{ + struct test_sockmap_progs_query *skel; + int err, map_fd, verdict_fd; + __u32 attach_flags = 0; + __u32 prog_ids[3] = {}; + __u32 prog_cnt = 3; + + skel = test_sockmap_progs_query__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load")) + return; + + map_fd = bpf_map__fd(skel->maps.sock_map); + + if (attach_type == BPF_SK_MSG_VERDICT) + verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict); + else + verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict); + + err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, + &attach_flags, prog_ids, &prog_cnt); + ASSERT_OK(err, "bpf_prog_query failed"); + ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); + ASSERT_EQ(prog_cnt, 0, "wrong program count on query"); + + err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0); + if (!ASSERT_OK(err, "bpf_prog_attach failed")) + goto out; + + prog_cnt = 1; + err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, + &attach_flags, prog_ids, &prog_cnt); + ASSERT_OK(err, "bpf_prog_query failed"); + ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); + ASSERT_EQ(prog_cnt, 1, "wrong program count on query"); + ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd), + "wrong prog_ids on query"); + + bpf_prog_detach2(verdict_fd, map_fd, attach_type); +out: + test_sockmap_progs_query__destroy(skel); +} + +void test_sockmap_basic(void) +{ + if (test__start_subtest("sockmap create_update_free")) + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); + if (test__start_subtest("sockhash create_update_free")) + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); + if (test__start_subtest("sockmap sk_msg load helpers")) + test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP); + if (test__start_subtest("sockhash sk_msg load helpers")) + test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH); + if (test__start_subtest("sockmap update")) + test_sockmap_update(BPF_MAP_TYPE_SOCKMAP); + if (test__start_subtest("sockhash update")) + test_sockmap_update(BPF_MAP_TYPE_SOCKHASH); + if (test__start_subtest("sockmap update in unsafe context")) + test_sockmap_invalid_update(); + if (test__start_subtest("sockmap copy")) + test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP); + if (test__start_subtest("sockhash copy")) + test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH); + if (test__start_subtest("sockmap skb_verdict attach")) { + test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT, + BPF_SK_SKB_STREAM_VERDICT); + test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT, + BPF_SK_SKB_VERDICT); + } + if (test__start_subtest("sockmap msg_verdict progs query")) + test_sockmap_progs_query(BPF_SK_MSG_VERDICT); + if (test__start_subtest("sockmap stream_parser progs query")) + test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER); + if (test__start_subtest("sockmap stream_verdict progs query")) + test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT); + if (test__start_subtest("sockmap skb_verdict progs query")) + test_sockmap_progs_query(BPF_SK_SKB_VERDICT); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c new file mode 100644 index 000000000..2d0796314 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Cloudflare +/* + * Tests for sockmap/sockhash holding kTLS sockets. + */ + +#include <netinet/tcp.h> +#include "test_progs.h" + +#define MAX_TEST_NAME 80 +#define TCP_ULP 31 + +static int tcp_server(int family) +{ + int err, s; + + s = socket(family, SOCK_STREAM, 0); + if (!ASSERT_GE(s, 0, "socket")) + return -1; + + err = listen(s, SOMAXCONN); + if (!ASSERT_OK(err, "listen")) + return -1; + + return s; +} + +static int disconnect(int fd) +{ + struct sockaddr unspec = { AF_UNSPEC }; + + return connect(fd, &unspec, sizeof(unspec)); +} + +/* Disconnect (unhash) a kTLS socket after removing it from sockmap. */ +static void test_sockmap_ktls_disconnect_after_delete(int family, int map) +{ + struct sockaddr_storage addr = {0}; + socklen_t len = sizeof(addr); + int err, cli, srv, zero = 0; + + srv = tcp_server(family); + if (srv == -1) + return; + + err = getsockname(srv, (struct sockaddr *)&addr, &len); + if (!ASSERT_OK(err, "getsockopt")) + goto close_srv; + + cli = socket(family, SOCK_STREAM, 0); + if (!ASSERT_GE(cli, 0, "socket")) + goto close_srv; + + err = connect(cli, (struct sockaddr *)&addr, len); + if (!ASSERT_OK(err, "connect")) + goto close_cli; + + err = bpf_map_update_elem(map, &zero, &cli, 0); + if (!ASSERT_OK(err, "bpf_map_update_elem")) + goto close_cli; + + err = setsockopt(cli, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); + if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) + goto close_cli; + + err = bpf_map_delete_elem(map, &zero); + if (!ASSERT_OK(err, "bpf_map_delete_elem")) + goto close_cli; + + err = disconnect(cli); + ASSERT_OK(err, "disconnect"); + +close_cli: + close(cli); +close_srv: + close(srv); +} + +static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map) +{ + struct sockaddr_storage addr = {}; + socklen_t len = sizeof(addr); + struct sockaddr_in6 *v6; + struct sockaddr_in *v4; + int err, s, zero = 0; + + switch (family) { + case AF_INET: + v4 = (struct sockaddr_in *)&addr; + v4->sin_family = AF_INET; + break; + case AF_INET6: + v6 = (struct sockaddr_in6 *)&addr; + v6->sin6_family = AF_INET6; + break; + default: + PRINT_FAIL("unsupported socket family %d", family); + return; + } + + s = socket(family, SOCK_STREAM, 0); + if (!ASSERT_GE(s, 0, "socket")) + return; + + err = bind(s, (struct sockaddr *)&addr, len); + if (!ASSERT_OK(err, "bind")) + goto close; + + err = getsockname(s, (struct sockaddr *)&addr, &len); + if (!ASSERT_OK(err, "getsockname")) + goto close; + + err = connect(s, (struct sockaddr *)&addr, len); + if (!ASSERT_OK(err, "connect")) + goto close; + + /* save sk->sk_prot and set it to tls_prots */ + err = setsockopt(s, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); + if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) + goto close; + + /* sockmap update should not affect saved sk_prot */ + err = bpf_map_update_elem(map, &zero, &s, BPF_ANY); + if (!ASSERT_ERR(err, "sockmap update elem")) + goto close; + + /* call sk->sk_prot->setsockopt to dispatch to saved sk_prot */ + err = setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero)); + ASSERT_OK(err, "setsockopt(TCP_NODELAY)"); + +close: + close(s); +} + +static const char *fmt_test_name(const char *subtest_name, int family, + enum bpf_map_type map_type) +{ + const char *map_type_str = BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH"; + const char *family_str = AF_INET ? "IPv4" : "IPv6"; + static char test_name[MAX_TEST_NAME]; + + snprintf(test_name, MAX_TEST_NAME, + "sockmap_ktls %s %s %s", + subtest_name, family_str, map_type_str); + + return test_name; +} + +static void run_tests(int family, enum bpf_map_type map_type) +{ + int map; + + map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); + if (!ASSERT_GE(map, 0, "bpf_map_create")) + return; + + if (test__start_subtest(fmt_test_name("disconnect_after_delete", family, map_type))) + test_sockmap_ktls_disconnect_after_delete(family, map); + if (test__start_subtest(fmt_test_name("update_fails_when_sock_has_ulp", family, map_type))) + test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map); + + close(map); +} + +void test_sockmap_ktls(void) +{ + run_tests(AF_INET, BPF_MAP_TYPE_SOCKMAP); + run_tests(AF_INET, BPF_MAP_TYPE_SOCKHASH); + run_tests(AF_INET6, BPF_MAP_TYPE_SOCKMAP); + run_tests(AF_INET6, BPF_MAP_TYPE_SOCKHASH); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c new file mode 100644 index 000000000..2cf0c7a3f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -0,0 +1,2026 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Cloudflare +/* + * Test suite for SOCKMAP/SOCKHASH holding listening sockets. + * Covers: + * 1. BPF map operations - bpf_map_{update,lookup delete}_elem + * 2. BPF redirect helpers - bpf_{sk,msg}_redirect_map + * 3. BPF reuseport helper - bpf_sk_select_reuseport + */ + +#include <linux/compiler.h> +#include <errno.h> +#include <error.h> +#include <limits.h> +#include <netinet/in.h> +#include <pthread.h> +#include <stdlib.h> +#include <string.h> +#include <sys/select.h> +#include <unistd.h> + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include "bpf_util.h" +#include "test_progs.h" +#include "test_sockmap_listen.skel.h" + +#define IO_TIMEOUT_SEC 30 +#define MAX_STRERR_LEN 256 +#define MAX_TEST_NAME 80 + +#define _FAIL(errnum, fmt...) \ + ({ \ + error_at_line(0, (errnum), __func__, __LINE__, fmt); \ + CHECK_FAIL(true); \ + }) +#define FAIL(fmt...) _FAIL(0, fmt) +#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt) +#define FAIL_LIBBPF(err, msg) \ + ({ \ + char __buf[MAX_STRERR_LEN]; \ + libbpf_strerror((err), __buf, sizeof(__buf)); \ + FAIL("%s: %s", (msg), __buf); \ + }) + +/* Wrappers that fail the test on error and report it. */ + +#define xaccept_nonblock(fd, addr, len) \ + ({ \ + int __ret = \ + accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \ + if (__ret == -1) \ + FAIL_ERRNO("accept"); \ + __ret; \ + }) + +#define xbind(fd, addr, len) \ + ({ \ + int __ret = bind((fd), (addr), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("bind"); \ + __ret; \ + }) + +#define xclose(fd) \ + ({ \ + int __ret = close((fd)); \ + if (__ret == -1) \ + FAIL_ERRNO("close"); \ + __ret; \ + }) + +#define xconnect(fd, addr, len) \ + ({ \ + int __ret = connect((fd), (addr), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("connect"); \ + __ret; \ + }) + +#define xgetsockname(fd, addr, len) \ + ({ \ + int __ret = getsockname((fd), (addr), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("getsockname"); \ + __ret; \ + }) + +#define xgetsockopt(fd, level, name, val, len) \ + ({ \ + int __ret = getsockopt((fd), (level), (name), (val), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("getsockopt(" #name ")"); \ + __ret; \ + }) + +#define xlisten(fd, backlog) \ + ({ \ + int __ret = listen((fd), (backlog)); \ + if (__ret == -1) \ + FAIL_ERRNO("listen"); \ + __ret; \ + }) + +#define xsetsockopt(fd, level, name, val, len) \ + ({ \ + int __ret = setsockopt((fd), (level), (name), (val), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("setsockopt(" #name ")"); \ + __ret; \ + }) + +#define xsend(fd, buf, len, flags) \ + ({ \ + ssize_t __ret = send((fd), (buf), (len), (flags)); \ + if (__ret == -1) \ + FAIL_ERRNO("send"); \ + __ret; \ + }) + +#define xrecv_nonblock(fd, buf, len, flags) \ + ({ \ + ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \ + IO_TIMEOUT_SEC); \ + if (__ret == -1) \ + FAIL_ERRNO("recv"); \ + __ret; \ + }) + +#define xsocket(family, sotype, flags) \ + ({ \ + int __ret = socket(family, sotype, flags); \ + if (__ret == -1) \ + FAIL_ERRNO("socket"); \ + __ret; \ + }) + +#define xbpf_map_delete_elem(fd, key) \ + ({ \ + int __ret = bpf_map_delete_elem((fd), (key)); \ + if (__ret < 0) \ + FAIL_ERRNO("map_delete"); \ + __ret; \ + }) + +#define xbpf_map_lookup_elem(fd, key, val) \ + ({ \ + int __ret = bpf_map_lookup_elem((fd), (key), (val)); \ + if (__ret < 0) \ + FAIL_ERRNO("map_lookup"); \ + __ret; \ + }) + +#define xbpf_map_update_elem(fd, key, val, flags) \ + ({ \ + int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \ + if (__ret < 0) \ + FAIL_ERRNO("map_update"); \ + __ret; \ + }) + +#define xbpf_prog_attach(prog, target, type, flags) \ + ({ \ + int __ret = \ + bpf_prog_attach((prog), (target), (type), (flags)); \ + if (__ret < 0) \ + FAIL_ERRNO("prog_attach(" #type ")"); \ + __ret; \ + }) + +#define xbpf_prog_detach2(prog, target, type) \ + ({ \ + int __ret = bpf_prog_detach2((prog), (target), (type)); \ + if (__ret < 0) \ + FAIL_ERRNO("prog_detach2(" #type ")"); \ + __ret; \ + }) + +#define xpthread_create(thread, attr, func, arg) \ + ({ \ + int __ret = pthread_create((thread), (attr), (func), (arg)); \ + errno = __ret; \ + if (__ret) \ + FAIL_ERRNO("pthread_create"); \ + __ret; \ + }) + +#define xpthread_join(thread, retval) \ + ({ \ + int __ret = pthread_join((thread), (retval)); \ + errno = __ret; \ + if (__ret) \ + FAIL_ERRNO("pthread_join"); \ + __ret; \ + }) + +static int poll_read(int fd, unsigned int timeout_sec) +{ + struct timeval timeout = { .tv_sec = timeout_sec }; + fd_set rfds; + int r; + + FD_ZERO(&rfds); + FD_SET(fd, &rfds); + + r = select(fd + 1, &rfds, NULL, NULL, &timeout); + if (r == 0) + errno = ETIME; + + return r == 1 ? 0 : -1; +} + +static int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len, + unsigned int timeout_sec) +{ + if (poll_read(fd, timeout_sec)) + return -1; + + return accept(fd, addr, len); +} + +static int recv_timeout(int fd, void *buf, size_t len, int flags, + unsigned int timeout_sec) +{ + if (poll_read(fd, timeout_sec)) + return -1; + + return recv(fd, buf, len, flags); +} + +static void init_addr_loopback4(struct sockaddr_storage *ss, socklen_t *len) +{ + struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss)); + + addr4->sin_family = AF_INET; + addr4->sin_port = 0; + addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + *len = sizeof(*addr4); +} + +static void init_addr_loopback6(struct sockaddr_storage *ss, socklen_t *len) +{ + struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss)); + + addr6->sin6_family = AF_INET6; + addr6->sin6_port = 0; + addr6->sin6_addr = in6addr_loopback; + *len = sizeof(*addr6); +} + +static void init_addr_loopback(int family, struct sockaddr_storage *ss, + socklen_t *len) +{ + switch (family) { + case AF_INET: + init_addr_loopback4(ss, len); + return; + case AF_INET6: + init_addr_loopback6(ss, len); + return; + default: + FAIL("unsupported address family %d", family); + } +} + +static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss) +{ + return (struct sockaddr *)ss; +} + +static int enable_reuseport(int s, int progfd) +{ + int err, one = 1; + + err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); + if (err) + return -1; + err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd, + sizeof(progfd)); + if (err) + return -1; + + return 0; +} + +static int socket_loopback_reuseport(int family, int sotype, int progfd) +{ + struct sockaddr_storage addr; + socklen_t len; + int err, s; + + init_addr_loopback(family, &addr, &len); + + s = xsocket(family, sotype, 0); + if (s == -1) + return -1; + + if (progfd >= 0) + enable_reuseport(s, progfd); + + err = xbind(s, sockaddr(&addr), len); + if (err) + goto close; + + if (sotype & SOCK_DGRAM) + return s; + + err = xlisten(s, SOMAXCONN); + if (err) + goto close; + + return s; +close: + xclose(s); + return -1; +} + +static int socket_loopback(int family, int sotype) +{ + return socket_loopback_reuseport(family, sotype, -1); +} + +static void test_insert_invalid(int family, int sotype, int mapfd) +{ + u32 key = 0; + u64 value; + int err; + + value = -1; + err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + if (!err || errno != EINVAL) + FAIL_ERRNO("map_update: expected EINVAL"); + + value = INT_MAX; + err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + if (!err || errno != EBADF) + FAIL_ERRNO("map_update: expected EBADF"); +} + +static void test_insert_opened(int family, int sotype, int mapfd) +{ + u32 key = 0; + u64 value; + int err, s; + + s = xsocket(family, sotype, 0); + if (s == -1) + return; + + errno = 0; + value = s; + err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + if (sotype == SOCK_STREAM) { + if (!err || errno != EOPNOTSUPP) + FAIL_ERRNO("map_update: expected EOPNOTSUPP"); + } else if (err) + FAIL_ERRNO("map_update: expected success"); + xclose(s); +} + +static void test_insert_bound(int family, int sotype, int mapfd) +{ + struct sockaddr_storage addr; + socklen_t len; + u32 key = 0; + u64 value; + int err, s; + + init_addr_loopback(family, &addr, &len); + + s = xsocket(family, sotype, 0); + if (s == -1) + return; + + err = xbind(s, sockaddr(&addr), len); + if (err) + goto close; + + errno = 0; + value = s; + err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + if (!err || errno != EOPNOTSUPP) + FAIL_ERRNO("map_update: expected EOPNOTSUPP"); +close: + xclose(s); +} + +static void test_insert(int family, int sotype, int mapfd) +{ + u64 value; + u32 key; + int s; + + s = socket_loopback(family, sotype); + if (s < 0) + return; + + key = 0; + value = s; + xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + xclose(s); +} + +static void test_delete_after_insert(int family, int sotype, int mapfd) +{ + u64 value; + u32 key; + int s; + + s = socket_loopback(family, sotype); + if (s < 0) + return; + + key = 0; + value = s; + xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + xbpf_map_delete_elem(mapfd, &key); + xclose(s); +} + +static void test_delete_after_close(int family, int sotype, int mapfd) +{ + int err, s; + u64 value; + u32 key; + + s = socket_loopback(family, sotype); + if (s < 0) + return; + + key = 0; + value = s; + xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + + xclose(s); + + errno = 0; + err = bpf_map_delete_elem(mapfd, &key); + if (!err || (errno != EINVAL && errno != ENOENT)) + /* SOCKMAP and SOCKHASH return different error codes */ + FAIL_ERRNO("map_delete: expected EINVAL/EINVAL"); +} + +static void test_lookup_after_insert(int family, int sotype, int mapfd) +{ + u64 cookie, value; + socklen_t len; + u32 key; + int s; + + s = socket_loopback(family, sotype); + if (s < 0) + return; + + key = 0; + value = s; + xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + + len = sizeof(cookie); + xgetsockopt(s, SOL_SOCKET, SO_COOKIE, &cookie, &len); + + xbpf_map_lookup_elem(mapfd, &key, &value); + + if (value != cookie) { + FAIL("map_lookup: have %#llx, want %#llx", + (unsigned long long)value, (unsigned long long)cookie); + } + + xclose(s); +} + +static void test_lookup_after_delete(int family, int sotype, int mapfd) +{ + int err, s; + u64 value; + u32 key; + + s = socket_loopback(family, sotype); + if (s < 0) + return; + + key = 0; + value = s; + xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + xbpf_map_delete_elem(mapfd, &key); + + errno = 0; + err = bpf_map_lookup_elem(mapfd, &key, &value); + if (!err || errno != ENOENT) + FAIL_ERRNO("map_lookup: expected ENOENT"); + + xclose(s); +} + +static void test_lookup_32_bit_value(int family, int sotype, int mapfd) +{ + u32 key, value32; + int err, s; + + s = socket_loopback(family, sotype); + if (s < 0) + return; + + mapfd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(key), + sizeof(value32), 1, NULL); + if (mapfd < 0) { + FAIL_ERRNO("map_create"); + goto close; + } + + key = 0; + value32 = s; + xbpf_map_update_elem(mapfd, &key, &value32, BPF_NOEXIST); + + errno = 0; + err = bpf_map_lookup_elem(mapfd, &key, &value32); + if (!err || errno != ENOSPC) + FAIL_ERRNO("map_lookup: expected ENOSPC"); + + xclose(mapfd); +close: + xclose(s); +} + +static void test_update_existing(int family, int sotype, int mapfd) +{ + int s1, s2; + u64 value; + u32 key; + + s1 = socket_loopback(family, sotype); + if (s1 < 0) + return; + + s2 = socket_loopback(family, sotype); + if (s2 < 0) + goto close_s1; + + key = 0; + value = s1; + xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + + value = s2; + xbpf_map_update_elem(mapfd, &key, &value, BPF_EXIST); + xclose(s2); +close_s1: + xclose(s1); +} + +/* Exercise the code path where we destroy child sockets that never + * got accept()'ed, aka orphans, when parent socket gets closed. + */ +static void test_destroy_orphan_child(int family, int sotype, int mapfd) +{ + struct sockaddr_storage addr; + socklen_t len; + int err, s, c; + u64 value; + u32 key; + + s = socket_loopback(family, sotype); + if (s < 0) + return; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close_srv; + + key = 0; + value = s; + xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + + c = xsocket(family, sotype, 0); + if (c == -1) + goto close_srv; + + xconnect(c, sockaddr(&addr), len); + xclose(c); +close_srv: + xclose(s); +} + +/* Perform a passive open after removing listening socket from SOCKMAP + * to ensure that callbacks get restored properly. + */ +static void test_clone_after_delete(int family, int sotype, int mapfd) +{ + struct sockaddr_storage addr; + socklen_t len; + int err, s, c; + u64 value; + u32 key; + + s = socket_loopback(family, sotype); + if (s < 0) + return; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close_srv; + + key = 0; + value = s; + xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST); + xbpf_map_delete_elem(mapfd, &key); + + c = xsocket(family, sotype, 0); + if (c < 0) + goto close_srv; + + xconnect(c, sockaddr(&addr), len); + xclose(c); +close_srv: + xclose(s); +} + +/* Check that child socket that got created while parent was in a + * SOCKMAP, but got accept()'ed only after the parent has been removed + * from SOCKMAP, gets cloned without parent psock state or callbacks. + */ +static void test_accept_after_delete(int family, int sotype, int mapfd) +{ + struct sockaddr_storage addr; + const u32 zero = 0; + int err, s, c, p; + socklen_t len; + u64 value; + + s = socket_loopback(family, sotype | SOCK_NONBLOCK); + if (s == -1) + return; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close_srv; + + value = s; + err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST); + if (err) + goto close_srv; + + c = xsocket(family, sotype, 0); + if (c == -1) + goto close_srv; + + /* Create child while parent is in sockmap */ + err = xconnect(c, sockaddr(&addr), len); + if (err) + goto close_cli; + + /* Remove parent from sockmap */ + err = xbpf_map_delete_elem(mapfd, &zero); + if (err) + goto close_cli; + + p = xaccept_nonblock(s, NULL, NULL); + if (p == -1) + goto close_cli; + + /* Check that child sk_user_data is not set */ + value = p; + xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST); + + xclose(p); +close_cli: + xclose(c); +close_srv: + xclose(s); +} + +/* Check that child socket that got created and accepted while parent + * was in a SOCKMAP is cloned without parent psock state or callbacks. + */ +static void test_accept_before_delete(int family, int sotype, int mapfd) +{ + struct sockaddr_storage addr; + const u32 zero = 0, one = 1; + int err, s, c, p; + socklen_t len; + u64 value; + + s = socket_loopback(family, sotype | SOCK_NONBLOCK); + if (s == -1) + return; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close_srv; + + value = s; + err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST); + if (err) + goto close_srv; + + c = xsocket(family, sotype, 0); + if (c == -1) + goto close_srv; + + /* Create & accept child while parent is in sockmap */ + err = xconnect(c, sockaddr(&addr), len); + if (err) + goto close_cli; + + p = xaccept_nonblock(s, NULL, NULL); + if (p == -1) + goto close_cli; + + /* Check that child sk_user_data is not set */ + value = p; + xbpf_map_update_elem(mapfd, &one, &value, BPF_NOEXIST); + + xclose(p); +close_cli: + xclose(c); +close_srv: + xclose(s); +} + +struct connect_accept_ctx { + int sockfd; + unsigned int done; + unsigned int nr_iter; +}; + +static bool is_thread_done(struct connect_accept_ctx *ctx) +{ + return READ_ONCE(ctx->done); +} + +static void *connect_accept_thread(void *arg) +{ + struct connect_accept_ctx *ctx = arg; + struct sockaddr_storage addr; + int family, socktype; + socklen_t len; + int err, i, s; + + s = ctx->sockfd; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto done; + + len = sizeof(family); + err = xgetsockopt(s, SOL_SOCKET, SO_DOMAIN, &family, &len); + if (err) + goto done; + + len = sizeof(socktype); + err = xgetsockopt(s, SOL_SOCKET, SO_TYPE, &socktype, &len); + if (err) + goto done; + + for (i = 0; i < ctx->nr_iter; i++) { + int c, p; + + c = xsocket(family, socktype, 0); + if (c < 0) + break; + + err = xconnect(c, (struct sockaddr *)&addr, sizeof(addr)); + if (err) { + xclose(c); + break; + } + + p = xaccept_nonblock(s, NULL, NULL); + if (p < 0) { + xclose(c); + break; + } + + xclose(p); + xclose(c); + } +done: + WRITE_ONCE(ctx->done, 1); + return NULL; +} + +static void test_syn_recv_insert_delete(int family, int sotype, int mapfd) +{ + struct connect_accept_ctx ctx = { 0 }; + struct sockaddr_storage addr; + socklen_t len; + u32 zero = 0; + pthread_t t; + int err, s; + u64 value; + + s = socket_loopback(family, sotype | SOCK_NONBLOCK); + if (s < 0) + return; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close; + + ctx.sockfd = s; + ctx.nr_iter = 1000; + + err = xpthread_create(&t, NULL, connect_accept_thread, &ctx); + if (err) + goto close; + + value = s; + while (!is_thread_done(&ctx)) { + err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST); + if (err) + break; + + err = xbpf_map_delete_elem(mapfd, &zero); + if (err) + break; + } + + xpthread_join(t, NULL); +close: + xclose(s); +} + +static void *listen_thread(void *arg) +{ + struct sockaddr unspec = { AF_UNSPEC }; + struct connect_accept_ctx *ctx = arg; + int err, i, s; + + s = ctx->sockfd; + + for (i = 0; i < ctx->nr_iter; i++) { + err = xlisten(s, 1); + if (err) + break; + err = xconnect(s, &unspec, sizeof(unspec)); + if (err) + break; + } + + WRITE_ONCE(ctx->done, 1); + return NULL; +} + +static void test_race_insert_listen(int family, int socktype, int mapfd) +{ + struct connect_accept_ctx ctx = { 0 }; + const u32 zero = 0; + const int one = 1; + pthread_t t; + int err, s; + u64 value; + + s = xsocket(family, socktype, 0); + if (s < 0) + return; + + err = xsetsockopt(s, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)); + if (err) + goto close; + + ctx.sockfd = s; + ctx.nr_iter = 10000; + + err = pthread_create(&t, NULL, listen_thread, &ctx); + if (err) + goto close; + + value = s; + while (!is_thread_done(&ctx)) { + err = bpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST); + /* Expecting EOPNOTSUPP before listen() */ + if (err && errno != EOPNOTSUPP) { + FAIL_ERRNO("map_update"); + break; + } + + err = bpf_map_delete_elem(mapfd, &zero); + /* Expecting no entry after unhash on connect(AF_UNSPEC) */ + if (err && errno != EINVAL && errno != ENOENT) { + FAIL_ERRNO("map_delete"); + break; + } + } + + xpthread_join(t, NULL); +close: + xclose(s); +} + +static void zero_verdict_count(int mapfd) +{ + unsigned int zero = 0; + int key; + + key = SK_DROP; + xbpf_map_update_elem(mapfd, &key, &zero, BPF_ANY); + key = SK_PASS; + xbpf_map_update_elem(mapfd, &key, &zero, BPF_ANY); +} + +enum redir_mode { + REDIR_INGRESS, + REDIR_EGRESS, +}; + +static const char *redir_mode_str(enum redir_mode mode) +{ + switch (mode) { + case REDIR_INGRESS: + return "ingress"; + case REDIR_EGRESS: + return "egress"; + default: + return "unknown"; + } +} + +static int add_to_sockmap(int sock_mapfd, int fd1, int fd2) +{ + u64 value; + u32 key; + int err; + + key = 0; + value = fd1; + err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); + if (err) + return err; + + key = 1; + value = fd2; + return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); +} + +static void redir_to_connected(int family, int sotype, int sock_mapfd, + int verd_mapfd, enum redir_mode mode) +{ + const char *log_prefix = redir_mode_str(mode); + struct sockaddr_storage addr; + int s, c0, c1, p0, p1; + unsigned int pass; + socklen_t len; + int err, n; + u32 key; + char b; + + zero_verdict_count(verd_mapfd); + + s = socket_loopback(family, sotype | SOCK_NONBLOCK); + if (s < 0) + return; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close_srv; + + c0 = xsocket(family, sotype, 0); + if (c0 < 0) + goto close_srv; + err = xconnect(c0, sockaddr(&addr), len); + if (err) + goto close_cli0; + + p0 = xaccept_nonblock(s, NULL, NULL); + if (p0 < 0) + goto close_cli0; + + c1 = xsocket(family, sotype, 0); + if (c1 < 0) + goto close_peer0; + err = xconnect(c1, sockaddr(&addr), len); + if (err) + goto close_cli1; + + p1 = xaccept_nonblock(s, NULL, NULL); + if (p1 < 0) + goto close_cli1; + + err = add_to_sockmap(sock_mapfd, p0, p1); + if (err) + goto close_peer1; + + n = write(mode == REDIR_INGRESS ? c1 : p1, "a", 1); + if (n < 0) + FAIL_ERRNO("%s: write", log_prefix); + if (n == 0) + FAIL("%s: incomplete write", log_prefix); + if (n < 1) + goto close_peer1; + + key = SK_PASS; + err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); + if (err) + goto close_peer1; + if (pass != 1) + FAIL("%s: want pass count 1, have %d", log_prefix, pass); + n = recv_timeout(c0, &b, 1, 0, IO_TIMEOUT_SEC); + if (n < 0) + FAIL_ERRNO("%s: recv_timeout", log_prefix); + if (n == 0) + FAIL("%s: incomplete recv", log_prefix); + +close_peer1: + xclose(p1); +close_cli1: + xclose(c1); +close_peer0: + xclose(p0); +close_cli0: + xclose(c0); +close_srv: + xclose(s); +} + +static void test_skb_redir_to_connected(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int family, + int sotype) +{ + int verdict = bpf_program__fd(skel->progs.prog_stream_verdict); + int parser = bpf_program__fd(skel->progs.prog_stream_parser); + int verdict_map = bpf_map__fd(skel->maps.verdict_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0); + if (err) + return; + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (err) + goto detach; + + redir_to_connected(family, sotype, sock_map, verdict_map, + REDIR_INGRESS); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT); +detach: + xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER); +} + +static void test_msg_redir_to_connected(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int family, + int sotype) +{ + int verdict = bpf_program__fd(skel->progs.prog_msg_verdict); + int verdict_map = bpf_map__fd(skel->maps.verdict_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_MSG_VERDICT, 0); + if (err) + return; + + redir_to_connected(family, sotype, sock_map, verdict_map, REDIR_EGRESS); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT); +} + +static void redir_to_listening(int family, int sotype, int sock_mapfd, + int verd_mapfd, enum redir_mode mode) +{ + const char *log_prefix = redir_mode_str(mode); + struct sockaddr_storage addr; + int s, c, p, err, n; + unsigned int drop; + socklen_t len; + u32 key; + + zero_verdict_count(verd_mapfd); + + s = socket_loopback(family, sotype | SOCK_NONBLOCK); + if (s < 0) + return; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close_srv; + + c = xsocket(family, sotype, 0); + if (c < 0) + goto close_srv; + err = xconnect(c, sockaddr(&addr), len); + if (err) + goto close_cli; + + p = xaccept_nonblock(s, NULL, NULL); + if (p < 0) + goto close_cli; + + err = add_to_sockmap(sock_mapfd, s, p); + if (err) + goto close_peer; + + n = write(mode == REDIR_INGRESS ? c : p, "a", 1); + if (n < 0 && errno != EACCES) + FAIL_ERRNO("%s: write", log_prefix); + if (n == 0) + FAIL("%s: incomplete write", log_prefix); + if (n < 1) + goto close_peer; + + key = SK_DROP; + err = xbpf_map_lookup_elem(verd_mapfd, &key, &drop); + if (err) + goto close_peer; + if (drop != 1) + FAIL("%s: want drop count 1, have %d", log_prefix, drop); + +close_peer: + xclose(p); +close_cli: + xclose(c); +close_srv: + xclose(s); +} + +static void test_skb_redir_to_listening(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int family, + int sotype) +{ + int verdict = bpf_program__fd(skel->progs.prog_stream_verdict); + int parser = bpf_program__fd(skel->progs.prog_stream_parser); + int verdict_map = bpf_map__fd(skel->maps.verdict_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0); + if (err) + return; + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (err) + goto detach; + + redir_to_listening(family, sotype, sock_map, verdict_map, + REDIR_INGRESS); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT); +detach: + xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER); +} + +static void test_msg_redir_to_listening(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int family, + int sotype) +{ + int verdict = bpf_program__fd(skel->progs.prog_msg_verdict); + int verdict_map = bpf_map__fd(skel->maps.verdict_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_MSG_VERDICT, 0); + if (err) + return; + + redir_to_listening(family, sotype, sock_map, verdict_map, REDIR_EGRESS); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT); +} + +static void test_reuseport_select_listening(int family, int sotype, + int sock_map, int verd_map, + int reuseport_prog) +{ + struct sockaddr_storage addr; + unsigned int pass; + int s, c, err; + socklen_t len; + u64 value; + u32 key; + + zero_verdict_count(verd_map); + + s = socket_loopback_reuseport(family, sotype | SOCK_NONBLOCK, + reuseport_prog); + if (s < 0) + return; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close_srv; + + key = 0; + value = s; + err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST); + if (err) + goto close_srv; + + c = xsocket(family, sotype, 0); + if (c < 0) + goto close_srv; + err = xconnect(c, sockaddr(&addr), len); + if (err) + goto close_cli; + + if (sotype == SOCK_STREAM) { + int p; + + p = xaccept_nonblock(s, NULL, NULL); + if (p < 0) + goto close_cli; + xclose(p); + } else { + char b = 'a'; + ssize_t n; + + n = xsend(c, &b, sizeof(b), 0); + if (n == -1) + goto close_cli; + + n = xrecv_nonblock(s, &b, sizeof(b), 0); + if (n == -1) + goto close_cli; + } + + key = SK_PASS; + err = xbpf_map_lookup_elem(verd_map, &key, &pass); + if (err) + goto close_cli; + if (pass != 1) + FAIL("want pass count 1, have %d", pass); + +close_cli: + xclose(c); +close_srv: + xclose(s); +} + +static void test_reuseport_select_connected(int family, int sotype, + int sock_map, int verd_map, + int reuseport_prog) +{ + struct sockaddr_storage addr; + int s, c0, c1, p0, err; + unsigned int drop; + socklen_t len; + u64 value; + u32 key; + + zero_verdict_count(verd_map); + + s = socket_loopback_reuseport(family, sotype, reuseport_prog); + if (s < 0) + return; + + /* Populate sock_map[0] to avoid ENOENT on first connection */ + key = 0; + value = s; + err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST); + if (err) + goto close_srv; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close_srv; + + c0 = xsocket(family, sotype, 0); + if (c0 < 0) + goto close_srv; + + err = xconnect(c0, sockaddr(&addr), len); + if (err) + goto close_cli0; + + if (sotype == SOCK_STREAM) { + p0 = xaccept_nonblock(s, NULL, NULL); + if (p0 < 0) + goto close_cli0; + } else { + p0 = xsocket(family, sotype, 0); + if (p0 < 0) + goto close_cli0; + + len = sizeof(addr); + err = xgetsockname(c0, sockaddr(&addr), &len); + if (err) + goto close_cli0; + + err = xconnect(p0, sockaddr(&addr), len); + if (err) + goto close_cli0; + } + + /* Update sock_map[0] to redirect to a connected socket */ + key = 0; + value = p0; + err = xbpf_map_update_elem(sock_map, &key, &value, BPF_EXIST); + if (err) + goto close_peer0; + + c1 = xsocket(family, sotype, 0); + if (c1 < 0) + goto close_peer0; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + goto close_srv; + + errno = 0; + err = connect(c1, sockaddr(&addr), len); + if (sotype == SOCK_DGRAM) { + char b = 'a'; + ssize_t n; + + n = xsend(c1, &b, sizeof(b), 0); + if (n == -1) + goto close_cli1; + + n = recv_timeout(c1, &b, sizeof(b), 0, IO_TIMEOUT_SEC); + err = n == -1; + } + if (!err || errno != ECONNREFUSED) + FAIL_ERRNO("connect: expected ECONNREFUSED"); + + key = SK_DROP; + err = xbpf_map_lookup_elem(verd_map, &key, &drop); + if (err) + goto close_cli1; + if (drop != 1) + FAIL("want drop count 1, have %d", drop); + +close_cli1: + xclose(c1); +close_peer0: + xclose(p0); +close_cli0: + xclose(c0); +close_srv: + xclose(s); +} + +/* Check that redirecting across reuseport groups is not allowed. */ +static void test_reuseport_mixed_groups(int family, int sotype, int sock_map, + int verd_map, int reuseport_prog) +{ + struct sockaddr_storage addr; + int s1, s2, c, err; + unsigned int drop; + socklen_t len; + u32 key; + + zero_verdict_count(verd_map); + + /* Create two listeners, each in its own reuseport group */ + s1 = socket_loopback_reuseport(family, sotype, reuseport_prog); + if (s1 < 0) + return; + + s2 = socket_loopback_reuseport(family, sotype, reuseport_prog); + if (s2 < 0) + goto close_srv1; + + err = add_to_sockmap(sock_map, s1, s2); + if (err) + goto close_srv2; + + /* Connect to s2, reuseport BPF selects s1 via sock_map[0] */ + len = sizeof(addr); + err = xgetsockname(s2, sockaddr(&addr), &len); + if (err) + goto close_srv2; + + c = xsocket(family, sotype, 0); + if (c < 0) + goto close_srv2; + + err = connect(c, sockaddr(&addr), len); + if (sotype == SOCK_DGRAM) { + char b = 'a'; + ssize_t n; + + n = xsend(c, &b, sizeof(b), 0); + if (n == -1) + goto close_cli; + + n = recv_timeout(c, &b, sizeof(b), 0, IO_TIMEOUT_SEC); + err = n == -1; + } + if (!err || errno != ECONNREFUSED) { + FAIL_ERRNO("connect: expected ECONNREFUSED"); + goto close_cli; + } + + /* Expect drop, can't redirect outside of reuseport group */ + key = SK_DROP; + err = xbpf_map_lookup_elem(verd_map, &key, &drop); + if (err) + goto close_cli; + if (drop != 1) + FAIL("want drop count 1, have %d", drop); + +close_cli: + xclose(c); +close_srv2: + xclose(s2); +close_srv1: + xclose(s1); +} + +#define TEST(fn, ...) \ + { \ + fn, #fn, __VA_ARGS__ \ + } + +static void test_ops_cleanup(const struct bpf_map *map) +{ + int err, mapfd; + u32 key; + + mapfd = bpf_map__fd(map); + + for (key = 0; key < bpf_map__max_entries(map); key++) { + err = bpf_map_delete_elem(mapfd, &key); + if (err && errno != EINVAL && errno != ENOENT) + FAIL_ERRNO("map_delete: expected EINVAL/ENOENT"); + } +} + +static const char *family_str(sa_family_t family) +{ + switch (family) { + case AF_INET: + return "IPv4"; + case AF_INET6: + return "IPv6"; + case AF_UNIX: + return "Unix"; + default: + return "unknown"; + } +} + +static const char *map_type_str(const struct bpf_map *map) +{ + int type; + + if (!map) + return "invalid"; + type = bpf_map__type(map); + + switch (type) { + case BPF_MAP_TYPE_SOCKMAP: + return "sockmap"; + case BPF_MAP_TYPE_SOCKHASH: + return "sockhash"; + default: + return "unknown"; + } +} + +static const char *sotype_str(int sotype) +{ + switch (sotype) { + case SOCK_DGRAM: + return "UDP"; + case SOCK_STREAM: + return "TCP"; + default: + return "unknown"; + } +} + +static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map, + int family, int sotype) +{ + const struct op_test { + void (*fn)(int family, int sotype, int mapfd); + const char *name; + int sotype; + } tests[] = { + /* insert */ + TEST(test_insert_invalid), + TEST(test_insert_opened), + TEST(test_insert_bound, SOCK_STREAM), + TEST(test_insert), + /* delete */ + TEST(test_delete_after_insert), + TEST(test_delete_after_close), + /* lookup */ + TEST(test_lookup_after_insert), + TEST(test_lookup_after_delete), + TEST(test_lookup_32_bit_value), + /* update */ + TEST(test_update_existing), + /* races with insert/delete */ + TEST(test_destroy_orphan_child, SOCK_STREAM), + TEST(test_syn_recv_insert_delete, SOCK_STREAM), + TEST(test_race_insert_listen, SOCK_STREAM), + /* child clone */ + TEST(test_clone_after_delete, SOCK_STREAM), + TEST(test_accept_after_delete, SOCK_STREAM), + TEST(test_accept_before_delete, SOCK_STREAM), + }; + const char *family_name, *map_name, *sotype_name; + const struct op_test *t; + char s[MAX_TEST_NAME]; + int map_fd; + + family_name = family_str(family); + map_name = map_type_str(map); + sotype_name = sotype_str(sotype); + map_fd = bpf_map__fd(map); + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + snprintf(s, sizeof(s), "%s %s %s %s", map_name, family_name, + sotype_name, t->name); + + if (t->sotype != 0 && t->sotype != sotype) + continue; + + if (!test__start_subtest(s)) + continue; + + t->fn(family, sotype, map_fd); + test_ops_cleanup(map); + } +} + +static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map, + int family, int sotype) +{ + const struct redir_test { + void (*fn)(struct test_sockmap_listen *skel, + struct bpf_map *map, int family, int sotype); + const char *name; + } tests[] = { + TEST(test_skb_redir_to_connected), + TEST(test_skb_redir_to_listening), + TEST(test_msg_redir_to_connected), + TEST(test_msg_redir_to_listening), + }; + const char *family_name, *map_name; + const struct redir_test *t; + char s[MAX_TEST_NAME]; + + family_name = family_str(family); + map_name = map_type_str(map); + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, + t->name); + + if (!test__start_subtest(s)) + continue; + + t->fn(skel, map, family, sotype); + } +} + +static void unix_redir_to_connected(int sotype, int sock_mapfd, + int verd_mapfd, enum redir_mode mode) +{ + const char *log_prefix = redir_mode_str(mode); + int c0, c1, p0, p1; + unsigned int pass; + int err, n; + int sfd[2]; + u32 key; + char b; + + zero_verdict_count(verd_mapfd); + + if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd)) + return; + c0 = sfd[0], p0 = sfd[1]; + + if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd)) + goto close0; + c1 = sfd[0], p1 = sfd[1]; + + err = add_to_sockmap(sock_mapfd, p0, p1); + if (err) + goto close; + + n = write(c1, "a", 1); + if (n < 0) + FAIL_ERRNO("%s: write", log_prefix); + if (n == 0) + FAIL("%s: incomplete write", log_prefix); + if (n < 1) + goto close; + + key = SK_PASS; + err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); + if (err) + goto close; + if (pass != 1) + FAIL("%s: want pass count 1, have %d", log_prefix, pass); + + n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC); + if (n < 0) + FAIL_ERRNO("%s: recv_timeout", log_prefix); + if (n == 0) + FAIL("%s: incomplete recv", log_prefix); + +close: + xclose(c1); + xclose(p1); +close0: + xclose(c0); + xclose(p0); +} + +static void unix_skb_redir_to_connected(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int sotype) +{ + int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); + int verdict_map = bpf_map__fd(skel->maps.verdict_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); + if (err) + return; + + skel->bss->test_ingress = false; + unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_EGRESS); + skel->bss->test_ingress = true; + unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_INGRESS); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); +} + +static void test_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map, + int sotype) +{ + const char *family_name, *map_name; + char s[MAX_TEST_NAME]; + + family_name = family_str(AF_UNIX); + map_name = map_type_str(map); + snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); + if (!test__start_subtest(s)) + return; + unix_skb_redir_to_connected(skel, map, sotype); +} + +static void test_reuseport(struct test_sockmap_listen *skel, + struct bpf_map *map, int family, int sotype) +{ + const struct reuseport_test { + void (*fn)(int family, int sotype, int socket_map, + int verdict_map, int reuseport_prog); + const char *name; + int sotype; + } tests[] = { + TEST(test_reuseport_select_listening), + TEST(test_reuseport_select_connected), + TEST(test_reuseport_mixed_groups), + }; + int socket_map, verdict_map, reuseport_prog; + const char *family_name, *map_name, *sotype_name; + const struct reuseport_test *t; + char s[MAX_TEST_NAME]; + + family_name = family_str(family); + map_name = map_type_str(map); + sotype_name = sotype_str(sotype); + + socket_map = bpf_map__fd(map); + verdict_map = bpf_map__fd(skel->maps.verdict_map); + reuseport_prog = bpf_program__fd(skel->progs.prog_reuseport); + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + snprintf(s, sizeof(s), "%s %s %s %s", map_name, family_name, + sotype_name, t->name); + + if (t->sotype != 0 && t->sotype != sotype) + continue; + + if (!test__start_subtest(s)) + continue; + + t->fn(family, sotype, socket_map, verdict_map, reuseport_prog); + } +} + +static int inet_socketpair(int family, int type, int *s, int *c) +{ + struct sockaddr_storage addr; + socklen_t len; + int p0, c0; + int err; + + p0 = socket_loopback(family, type | SOCK_NONBLOCK); + if (p0 < 0) + return p0; + + len = sizeof(addr); + err = xgetsockname(p0, sockaddr(&addr), &len); + if (err) + goto close_peer0; + + c0 = xsocket(family, type | SOCK_NONBLOCK, 0); + if (c0 < 0) { + err = c0; + goto close_peer0; + } + err = xconnect(c0, sockaddr(&addr), len); + if (err) + goto close_cli0; + err = xgetsockname(c0, sockaddr(&addr), &len); + if (err) + goto close_cli0; + err = xconnect(p0, sockaddr(&addr), len); + if (err) + goto close_cli0; + + *s = p0; + *c = c0; + return 0; + +close_cli0: + xclose(c0); +close_peer0: + xclose(p0); + return err; +} + +static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd, + enum redir_mode mode) +{ + const char *log_prefix = redir_mode_str(mode); + int c0, c1, p0, p1; + unsigned int pass; + int err, n; + u32 key; + char b; + + zero_verdict_count(verd_mapfd); + + err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0); + if (err) + return; + err = inet_socketpair(family, SOCK_DGRAM, &p1, &c1); + if (err) + goto close_cli0; + + err = add_to_sockmap(sock_mapfd, p0, p1); + if (err) + goto close_cli1; + + n = write(c1, "a", 1); + if (n < 0) + FAIL_ERRNO("%s: write", log_prefix); + if (n == 0) + FAIL("%s: incomplete write", log_prefix); + if (n < 1) + goto close_cli1; + + key = SK_PASS; + err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); + if (err) + goto close_cli1; + if (pass != 1) + FAIL("%s: want pass count 1, have %d", log_prefix, pass); + + n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC); + if (n < 0) + FAIL_ERRNO("%s: recv_timeout", log_prefix); + if (n == 0) + FAIL("%s: incomplete recv", log_prefix); + +close_cli1: + xclose(c1); + xclose(p1); +close_cli0: + xclose(c0); + xclose(p0); +} + +static void udp_skb_redir_to_connected(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int family) +{ + int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); + int verdict_map = bpf_map__fd(skel->maps.verdict_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); + if (err) + return; + + skel->bss->test_ingress = false; + udp_redir_to_connected(family, sock_map, verdict_map, REDIR_EGRESS); + skel->bss->test_ingress = true; + udp_redir_to_connected(family, sock_map, verdict_map, REDIR_INGRESS); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); +} + +static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map, + int family) +{ + const char *family_name, *map_name; + char s[MAX_TEST_NAME]; + + family_name = family_str(family); + map_name = map_type_str(map); + snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); + if (!test__start_subtest(s)) + return; + udp_skb_redir_to_connected(skel, map, family); +} + +static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd, + int verd_mapfd, enum redir_mode mode) +{ + const char *log_prefix = redir_mode_str(mode); + int c0, c1, p0, p1; + unsigned int pass; + int err, n; + int sfd[2]; + u32 key; + char b; + + zero_verdict_count(verd_mapfd); + + if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd)) + return; + c0 = sfd[0], p0 = sfd[1]; + + err = inet_socketpair(family, SOCK_DGRAM, &p1, &c1); + if (err) + goto close; + + err = add_to_sockmap(sock_mapfd, p0, p1); + if (err) + goto close_cli1; + + n = write(c1, "a", 1); + if (n < 0) + FAIL_ERRNO("%s: write", log_prefix); + if (n == 0) + FAIL("%s: incomplete write", log_prefix); + if (n < 1) + goto close_cli1; + + key = SK_PASS; + err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); + if (err) + goto close_cli1; + if (pass != 1) + FAIL("%s: want pass count 1, have %d", log_prefix, pass); + + n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC); + if (n < 0) + FAIL_ERRNO("%s: recv_timeout", log_prefix); + if (n == 0) + FAIL("%s: incomplete recv", log_prefix); + +close_cli1: + xclose(c1); + xclose(p1); +close: + xclose(c0); + xclose(p0); +} + +static void inet_unix_skb_redir_to_connected(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int family) +{ + int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); + int verdict_map = bpf_map__fd(skel->maps.verdict_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); + if (err) + return; + + skel->bss->test_ingress = false; + inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, + REDIR_EGRESS); + inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, + REDIR_EGRESS); + skel->bss->test_ingress = true; + inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, + REDIR_INGRESS); + inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, + REDIR_INGRESS); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); +} + +static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd, + int verd_mapfd, enum redir_mode mode) +{ + const char *log_prefix = redir_mode_str(mode); + int c0, c1, p0, p1; + unsigned int pass; + int err, n; + int sfd[2]; + u32 key; + char b; + + zero_verdict_count(verd_mapfd); + + err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0); + if (err) + return; + + if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd)) + goto close_cli0; + c1 = sfd[0], p1 = sfd[1]; + + err = add_to_sockmap(sock_mapfd, p0, p1); + if (err) + goto close; + + n = write(c1, "a", 1); + if (n < 0) + FAIL_ERRNO("%s: write", log_prefix); + if (n == 0) + FAIL("%s: incomplete write", log_prefix); + if (n < 1) + goto close; + + key = SK_PASS; + err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); + if (err) + goto close; + if (pass != 1) + FAIL("%s: want pass count 1, have %d", log_prefix, pass); + + n = recv_timeout(mode == REDIR_INGRESS ? p0 : c0, &b, 1, 0, IO_TIMEOUT_SEC); + if (n < 0) + FAIL_ERRNO("%s: recv_timeout", log_prefix); + if (n == 0) + FAIL("%s: incomplete recv", log_prefix); + +close: + xclose(c1); + xclose(p1); +close_cli0: + xclose(c0); + xclose(p0); + +} + +static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, int family) +{ + int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); + int verdict_map = bpf_map__fd(skel->maps.verdict_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); + if (err) + return; + + skel->bss->test_ingress = false; + unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, + REDIR_EGRESS); + unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, + REDIR_EGRESS); + skel->bss->test_ingress = true; + unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, + REDIR_INGRESS); + unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, + REDIR_INGRESS); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); +} + +static void test_udp_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map, + int family) +{ + const char *family_name, *map_name; + char s[MAX_TEST_NAME]; + + family_name = family_str(family); + map_name = map_type_str(map); + snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); + if (!test__start_subtest(s)) + return; + inet_unix_skb_redir_to_connected(skel, map, family); + unix_inet_skb_redir_to_connected(skel, map, family); +} + +static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, + int family) +{ + test_ops(skel, map, family, SOCK_STREAM); + test_ops(skel, map, family, SOCK_DGRAM); + test_redir(skel, map, family, SOCK_STREAM); + test_reuseport(skel, map, family, SOCK_STREAM); + test_reuseport(skel, map, family, SOCK_DGRAM); + test_udp_redir(skel, map, family); + test_udp_unix_redir(skel, map, family); +} + +void serial_test_sockmap_listen(void) +{ + struct test_sockmap_listen *skel; + + skel = test_sockmap_listen__open_and_load(); + if (!skel) { + FAIL("skeleton open/load failed"); + return; + } + + skel->bss->test_sockmap = true; + run_tests(skel, skel->maps.sock_map, AF_INET); + run_tests(skel, skel->maps.sock_map, AF_INET6); + test_unix_redir(skel, skel->maps.sock_map, SOCK_DGRAM); + test_unix_redir(skel, skel->maps.sock_map, SOCK_STREAM); + + skel->bss->test_sockmap = false; + run_tests(skel, skel->maps.sock_hash, AF_INET); + run_tests(skel, skel->maps.sock_hash, AF_INET6); + test_unix_redir(skel, skel->maps.sock_hash, SOCK_DGRAM); + test_unix_redir(skel, skel->maps.sock_hash, SOCK_STREAM); + + test_sockmap_listen__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c new file mode 100644 index 000000000..aa4debf62 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c @@ -0,0 +1,984 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" + +static char bpf_log_buf[4096]; +static bool verbose; + +enum sockopt_test_error { + OK = 0, + DENY_LOAD, + DENY_ATTACH, + EPERM_GETSOCKOPT, + EFAULT_GETSOCKOPT, + EPERM_SETSOCKOPT, + EFAULT_SETSOCKOPT, +}; + +static struct sockopt_test { + const char *descr; + const struct bpf_insn insns[64]; + enum bpf_attach_type attach_type; + enum bpf_attach_type expected_attach_type; + + int set_optname; + int set_level; + const char set_optval[64]; + socklen_t set_optlen; + + int get_optname; + int get_level; + const char get_optval[64]; + socklen_t get_optlen; + socklen_t get_optlen_ret; + + enum sockopt_test_error error; +} tests[] = { + + /* ==================== getsockopt ==================== */ + + { + .descr = "getsockopt: no expected_attach_type", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = 0, + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: wrong expected_attach_type", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + .error = DENY_ATTACH, + }, + { + .descr = "getsockopt: bypass bpf hook", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 1 << 3 }, + .set_optlen = 1, + + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "getsockopt: return EPERM from bpf hook", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = SOL_IP, + .get_optname = IP_TOS, + + .get_optlen = 1, + .error = EPERM_GETSOCKOPT, + }, + { + .descr = "getsockopt: no optval bounds check, deny loading", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + + /* ctx->optval[0] = 0x80 */ + BPF_MOV64_IMM(BPF_REG_0, 0x80), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_0, 0), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: read ctx->level", + .insns = { + /* r6 = ctx->level */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, level)), + + /* if (ctx->level == 123) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4), + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = 123, + + .get_optlen = 1, + }, + { + .descr = "getsockopt: deny writing to ctx->level", + .insns = { + /* ctx->level = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, level)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: read ctx->optname", + .insns = { + /* r6 = ctx->optname */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optname)), + + /* if (ctx->optname == 123) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4), + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optname = 123, + + .get_optlen = 1, + }, + { + .descr = "getsockopt: read ctx->retval", + .insns = { + /* r6 = ctx->retval */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = SOL_IP, + .get_optname = IP_TOS, + .get_optlen = 1, + }, + { + .descr = "getsockopt: deny writing to ctx->optname", + .insns = { + /* ctx->optname = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optname)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: read ctx->optlen", + .insns = { + /* r6 = ctx->optlen */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optlen)), + + /* if (ctx->optlen == 64) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 64, 4), + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optlen = 64, + }, + { + .descr = "getsockopt: deny bigger ctx->optlen", + .insns = { + /* ctx->optlen = 65 */ + BPF_MOV64_IMM(BPF_REG_0, 65), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optlen = 64, + + .error = EFAULT_GETSOCKOPT, + }, + { + .descr = "getsockopt: deny arbitrary ctx->retval", + .insns = { + /* ctx->retval = 123 */ + BPF_MOV64_IMM(BPF_REG_0, 123), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optlen = 64, + + .error = EFAULT_GETSOCKOPT, + }, + { + .descr = "getsockopt: support smaller ctx->optlen", + .insns = { + /* ctx->optlen = 32 */ + BPF_MOV64_IMM(BPF_REG_0, 32), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_optlen = 64, + .get_optlen_ret = 32, + }, + { + .descr = "getsockopt: deny writing to ctx->optval", + .insns = { + /* ctx->optval = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optval)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: deny writing to ctx->optval_end", + .insns = { + /* ctx->optval_end = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optval_end)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "getsockopt: rewrite value", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 0xF0 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0), + /* } */ + + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + + /* return 1*/ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_GETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + + .get_level = SOL_IP, + .get_optname = IP_TOS, + + .get_optval = { 0xF0 }, + .get_optlen = 1, + }, + + /* ==================== setsockopt ==================== */ + + { + .descr = "setsockopt: no expected_attach_type", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = 0, + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: wrong expected_attach_type", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + .error = DENY_ATTACH, + }, + { + .descr = "setsockopt: bypass bpf hook", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 1 << 3 }, + .set_optlen = 1, + + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: return EPERM from bpf hook", + .insns = { + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_level = SOL_IP, + .set_optname = IP_TOS, + + .set_optlen = 1, + .error = EPERM_SETSOCKOPT, + }, + { + .descr = "setsockopt: no optval bounds check, deny loading", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + + /* r0 = ctx->optval[0] */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: read ctx->level", + .insns = { + /* r6 = ctx->level */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, level)), + + /* if (ctx->level == 123) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4), + /* ctx->optlen = -1 */ + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_level = 123, + + .set_optlen = 1, + }, + { + .descr = "setsockopt: allow changing ctx->level", + .insns = { + /* ctx->level = SOL_IP */ + BPF_MOV64_IMM(BPF_REG_0, SOL_IP), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, level)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = 234, /* should be rewritten to SOL_IP */ + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 1 << 3 }, + .set_optlen = 1, + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: read ctx->optname", + .insns = { + /* r6 = ctx->optname */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optname)), + + /* if (ctx->optname == 123) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 123, 4), + /* ctx->optlen = -1 */ + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optname = 123, + + .set_optlen = 1, + }, + { + .descr = "setsockopt: allow changing ctx->optname", + .insns = { + /* ctx->optname = IP_TOS */ + BPF_MOV64_IMM(BPF_REG_0, IP_TOS), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optname)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = 456, /* should be rewritten to IP_TOS */ + + .set_optval = { 1 << 3 }, + .set_optlen = 1, + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: read ctx->optlen", + .insns = { + /* r6 = ctx->optlen */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optlen)), + + /* if (ctx->optlen == 64) { */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 64, 4), + /* ctx->optlen = -1 */ + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } else { */ + /* return 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optlen = 64, + }, + { + .descr = "setsockopt: ctx->optlen == -1 is ok", + .insns = { + /* ctx->optlen = -1 */ + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optlen = 64, + }, + { + .descr = "setsockopt: deny ctx->optlen < 0 (except -1)", + .insns = { + /* ctx->optlen = -2 */ + BPF_MOV64_IMM(BPF_REG_0, -2), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optlen = 4, + + .error = EFAULT_SETSOCKOPT, + }, + { + .descr = "setsockopt: deny ctx->optlen > input optlen", + .insns = { + /* ctx->optlen = 65 */ + BPF_MOV64_IMM(BPF_REG_0, 65), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_optlen = 64, + + .error = EFAULT_SETSOCKOPT, + }, + { + .descr = "setsockopt: allow changing ctx->optlen within bounds", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 1 << 3 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 1 << 3), + /* } */ + + /* ctx->optlen = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optlen)), + + /* return 1*/ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 1, 1, 1, 1 }, + .set_optlen = 4, + .get_optval = { 1 << 3 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: deny write ctx->retval", + .insns = { + /* ctx->retval = 0 */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: deny read ctx->retval", + .insns = { + /* r6 = ctx->retval */ + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, retval)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: deny writing to ctx->optval", + .insns = { + /* ctx->optval = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optval)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: deny writing to ctx->optval_end", + .insns = { + /* ctx->optval_end = 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct bpf_sockopt, optval_end)), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .error = DENY_LOAD, + }, + { + .descr = "setsockopt: allow IP_TOS <= 128", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r7 = ctx->optval + 1 */ + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1), + + /* r8 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 4), + + /* r9 = ctx->optval[0] */ + BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_6, 0), + + /* if (ctx->optval[0] < 128) */ + BPF_JMP_IMM(BPF_JGT, BPF_REG_9, 128, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } */ + + /* } else { */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 0x80 }, + .set_optlen = 1, + .get_optval = { 0x80 }, + .get_optlen = 1, + }, + { + .descr = "setsockopt: deny IP_TOS > 128", + .insns = { + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r7 = ctx->optval + 1 */ + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1), + + /* r8 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 4), + + /* r9 = ctx->optval[0] */ + BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_6, 0), + + /* if (ctx->optval[0] < 128) */ + BPF_JMP_IMM(BPF_JGT, BPF_REG_9, 128, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + /* } */ + + /* } else { */ + BPF_MOV64_IMM(BPF_REG_0, 0), + /* } */ + + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .get_level = SOL_IP, + .set_level = SOL_IP, + + .get_optname = IP_TOS, + .set_optname = IP_TOS, + + .set_optval = { 0x81 }, + .set_optlen = 1, + .get_optval = { 0x00 }, + .get_optlen = 1, + + .error = EPERM_SETSOCKOPT, + }, +}; + +static int load_prog(const struct bpf_insn *insns, + enum bpf_attach_type expected_attach_type) +{ + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .expected_attach_type = expected_attach_type, + .log_level = 2, + .log_buf = bpf_log_buf, + .log_size = sizeof(bpf_log_buf), + ); + int fd, insns_cnt = 0; + + for (; + insns[insns_cnt].code != (BPF_JMP | BPF_EXIT); + insns_cnt++) { + } + insns_cnt++; + + fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCKOPT, NULL, "GPL", insns, insns_cnt, &opts); + if (verbose && fd < 0) + fprintf(stderr, "%s\n", bpf_log_buf); + + return fd; +} + +static int run_test(int cgroup_fd, struct sockopt_test *test) +{ + int sock_fd, err, prog_fd; + void *optval = NULL; + int ret = 0; + + prog_fd = load_prog(test->insns, test->expected_attach_type); + if (prog_fd < 0) { + if (test->error == DENY_LOAD) + return 0; + + log_err("Failed to load BPF program"); + return -1; + } + + err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0); + if (err < 0) { + if (test->error == DENY_ATTACH) + goto close_prog_fd; + + log_err("Failed to attach BPF program"); + ret = -1; + goto close_prog_fd; + } + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (sock_fd < 0) { + log_err("Failed to create AF_INET socket"); + ret = -1; + goto detach_prog; + } + + if (test->set_optlen) { + err = setsockopt(sock_fd, test->set_level, test->set_optname, + test->set_optval, test->set_optlen); + if (err) { + if (errno == EPERM && test->error == EPERM_SETSOCKOPT) + goto close_sock_fd; + if (errno == EFAULT && test->error == EFAULT_SETSOCKOPT) + goto free_optval; + + log_err("Failed to call setsockopt"); + ret = -1; + goto close_sock_fd; + } + } + + if (test->get_optlen) { + optval = malloc(test->get_optlen); + socklen_t optlen = test->get_optlen; + socklen_t expected_get_optlen = test->get_optlen_ret ?: + test->get_optlen; + + err = getsockopt(sock_fd, test->get_level, test->get_optname, + optval, &optlen); + if (err) { + if (errno == EPERM && test->error == EPERM_GETSOCKOPT) + goto free_optval; + if (errno == EFAULT && test->error == EFAULT_GETSOCKOPT) + goto free_optval; + + log_err("Failed to call getsockopt"); + ret = -1; + goto free_optval; + } + + if (optlen != expected_get_optlen) { + errno = 0; + log_err("getsockopt returned unexpected optlen"); + ret = -1; + goto free_optval; + } + + if (memcmp(optval, test->get_optval, optlen) != 0) { + errno = 0; + log_err("getsockopt returned unexpected optval"); + ret = -1; + goto free_optval; + } + } + + ret = test->error != OK; + +free_optval: + free(optval); +close_sock_fd: + close(sock_fd); +detach_prog: + bpf_prog_detach2(prog_fd, cgroup_fd, test->attach_type); +close_prog_fd: + close(prog_fd); + return ret; +} + +void test_sockopt(void) +{ + int cgroup_fd, i; + + cgroup_fd = test__join_cgroup("/sockopt"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) + return; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + test__start_subtest(tests[i].descr); + ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr); + } + + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c new file mode 100644 index 000000000..60c17a8e2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" + +#define SOL_CUSTOM 0xdeadbeef +#define CUSTOM_INHERIT1 0 +#define CUSTOM_INHERIT2 1 +#define CUSTOM_LISTENER 2 + +static int connect_to_server(int server_fd) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + int fd; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (fd < 0) { + log_err("Failed to create client socket"); + return -1; + } + + if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get server addr"); + goto out; + } + + if (connect(fd, (const struct sockaddr *)&addr, len) < 0) { + log_err("Fail to connect to server"); + goto out; + } + + return fd; + +out: + close(fd); + return -1; +} + +static int verify_sockopt(int fd, int optname, const char *msg, char expected) +{ + socklen_t optlen = 1; + char buf = 0; + int err; + + err = getsockopt(fd, SOL_CUSTOM, optname, &buf, &optlen); + if (err) { + log_err("%s: failed to call getsockopt", msg); + return 1; + } + + printf("%s %d: got=0x%x ? expected=0x%x\n", msg, optname, buf, expected); + + if (buf != expected) { + log_err("%s: unexpected getsockopt value %d != %d", msg, + buf, expected); + return 1; + } + + return 0; +} + +static pthread_mutex_t server_started_mtx = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t server_started = PTHREAD_COND_INITIALIZER; + +static void *server_thread(void *arg) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + int fd = *(int *)arg; + int client_fd; + int err = 0; + + err = listen(fd, 1); + + pthread_mutex_lock(&server_started_mtx); + pthread_cond_signal(&server_started); + pthread_mutex_unlock(&server_started_mtx); + + if (!ASSERT_GE(err, 0, "listed on socket")) + return NULL; + + err += verify_sockopt(fd, CUSTOM_INHERIT1, "listen", 1); + err += verify_sockopt(fd, CUSTOM_INHERIT2, "listen", 1); + err += verify_sockopt(fd, CUSTOM_LISTENER, "listen", 1); + + client_fd = accept(fd, (struct sockaddr *)&addr, &len); + if (!ASSERT_GE(client_fd, 0, "accept client")) + return NULL; + + err += verify_sockopt(client_fd, CUSTOM_INHERIT1, "accept", 1); + err += verify_sockopt(client_fd, CUSTOM_INHERIT2, "accept", 1); + err += verify_sockopt(client_fd, CUSTOM_LISTENER, "accept", 0); + + close(client_fd); + + return (void *)(long)err; +} + +static int start_server(void) +{ + struct sockaddr_in addr = { + .sin_family = AF_INET, + .sin_addr.s_addr = htonl(INADDR_LOOPBACK), + }; + char buf; + int err; + int fd; + int i; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (fd < 0) { + log_err("Failed to create server socket"); + return -1; + } + + for (i = CUSTOM_INHERIT1; i <= CUSTOM_LISTENER; i++) { + buf = 0x01; + err = setsockopt(fd, SOL_CUSTOM, i, &buf, 1); + if (err) { + log_err("Failed to call setsockopt(%d)", i); + close(fd); + return -1; + } + } + + if (bind(fd, (const struct sockaddr *)&addr, sizeof(addr)) < 0) { + log_err("Failed to bind socket"); + close(fd); + return -1; + } + + return fd; +} + +static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, + const char *prog_name) +{ + enum bpf_attach_type attach_type; + enum bpf_prog_type prog_type; + struct bpf_program *prog; + int err; + + err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); + if (err) { + log_err("Failed to deduct types for %s BPF program", prog_name); + return -1; + } + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (!prog) { + log_err("Failed to find %s BPF program", prog_name); + return -1; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, + attach_type, 0); + if (err) { + log_err("Failed to attach %s BPF program", prog_name); + return -1; + } + + return 0; +} + +static void run_test(int cgroup_fd) +{ + int server_fd = -1, client_fd; + struct bpf_object *obj; + void *server_err; + pthread_t tid; + int err; + + obj = bpf_object__open_file("sockopt_inherit.bpf.o", NULL); + if (!ASSERT_OK_PTR(obj, "obj_open")) + return; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + goto close_bpf_object; + + err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt", "_getsockopt"); + if (!ASSERT_OK(err, "prog_attach _getsockopt")) + goto close_bpf_object; + + err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt", "_setsockopt"); + if (!ASSERT_OK(err, "prog_attach _setsockopt")) + goto close_bpf_object; + + server_fd = start_server(); + if (!ASSERT_GE(server_fd, 0, "start_server")) + goto close_bpf_object; + + pthread_mutex_lock(&server_started_mtx); + if (!ASSERT_OK(pthread_create(&tid, NULL, server_thread, + (void *)&server_fd), "pthread_create")) { + pthread_mutex_unlock(&server_started_mtx); + goto close_server_fd; + } + pthread_cond_wait(&server_started, &server_started_mtx); + pthread_mutex_unlock(&server_started_mtx); + + client_fd = connect_to_server(server_fd); + if (!ASSERT_GE(client_fd, 0, "connect_to_server")) + goto close_server_fd; + + ASSERT_OK(verify_sockopt(client_fd, CUSTOM_INHERIT1, "connect", 0), "verify_sockopt1"); + ASSERT_OK(verify_sockopt(client_fd, CUSTOM_INHERIT2, "connect", 0), "verify_sockopt2"); + ASSERT_OK(verify_sockopt(client_fd, CUSTOM_LISTENER, "connect", 0), "verify_sockopt ener"); + + pthread_join(tid, &server_err); + + err = (int)(long)server_err; + ASSERT_OK(err, "pthread_join retval"); + + close(client_fd); + +close_server_fd: + close(server_fd); +close_bpf_object: + bpf_object__close(obj); +} + +void test_sockopt_inherit(void) +{ + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/sockopt_inherit"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) + return; + + run_test(cgroup_fd); + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c new file mode 100644 index 000000000..7f5659349 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" + +static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) +{ + enum bpf_attach_type attach_type; + enum bpf_prog_type prog_type; + struct bpf_program *prog; + int err; + + err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); + if (err) { + log_err("Failed to deduct types for %s BPF program", title); + return -1; + } + + prog = bpf_object__find_program_by_name(obj, name); + if (!prog) { + log_err("Failed to find %s BPF program", name); + return -1; + } + + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, + attach_type, BPF_F_ALLOW_MULTI); + if (err) { + log_err("Failed to attach %s BPF program", name); + return -1; + } + + return 0; +} + +static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) +{ + enum bpf_attach_type attach_type; + enum bpf_prog_type prog_type; + struct bpf_program *prog; + int err; + + err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); + if (err) + return -1; + + prog = bpf_object__find_program_by_name(obj, name); + if (!prog) + return -1; + + err = bpf_prog_detach2(bpf_program__fd(prog), cgroup_fd, + attach_type); + if (err) + return -1; + + return 0; +} + +static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, + int cg_child, int sock_fd) +{ + socklen_t optlen; + __u8 buf; + int err; + + /* Set IP_TOS to the expected value (0x80). */ + + buf = 0x80; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x80) { + log_err("Unexpected getsockopt 0x%x != 0x80 without BPF", buf); + err = -1; + goto detach; + } + + /* Attach child program and make sure it returns new value: + * - kernel: -> 0x80 + * - child: 0x80 -> 0x90 + */ + + err = prog_attach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); + if (err) + goto detach; + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x90) { + log_err("Unexpected getsockopt 0x%x != 0x90", buf); + err = -1; + goto detach; + } + + /* Attach parent program and make sure it returns new value: + * - kernel: -> 0x80 + * - child: 0x80 -> 0x90 + * - parent: 0x90 -> 0xA0 + */ + + err = prog_attach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); + if (err) + goto detach; + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0xA0) { + log_err("Unexpected getsockopt 0x%x != 0xA0", buf); + err = -1; + goto detach; + } + + /* Setting unexpected initial sockopt should return EPERM: + * - kernel: -> 0x40 + * - child: unexpected 0x40, EPERM + * - parent: unexpected 0x40, EPERM + */ + + buf = 0x40; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (!err) { + log_err("Unexpected success from getsockopt(IP_TOS)"); + goto detach; + } + + /* Detach child program and make sure we still get EPERM: + * - kernel: -> 0x40 + * - parent: unexpected 0x40, EPERM + */ + + err = prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); + if (err) { + log_err("Failed to detach child program"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (!err) { + log_err("Unexpected success from getsockopt(IP_TOS)"); + goto detach; + } + + /* Set initial value to the one the parent program expects: + * - kernel: -> 0x90 + * - parent: 0x90 -> 0xA0 + */ + + buf = 0x90; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0xA0) { + log_err("Unexpected getsockopt 0x%x != 0xA0", buf); + err = -1; + goto detach; + } + +detach: + prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); + prog_detach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); + + return err; +} + +static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, + int cg_child, int sock_fd) +{ + socklen_t optlen; + __u8 buf; + int err; + + /* Set IP_TOS to the expected value (0x80). */ + + buf = 0x80; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x80) { + log_err("Unexpected getsockopt 0x%x != 0x80 without BPF", buf); + err = -1; + goto detach; + } + + /* Attach child program and make sure it adds 0x10. */ + + err = prog_attach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); + if (err) + goto detach; + + buf = 0x80; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x80 + 0x10) { + log_err("Unexpected getsockopt 0x%x != 0x80 + 0x10", buf); + err = -1; + goto detach; + } + + /* Attach parent program and make sure it adds another 0x10. */ + + err = prog_attach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); + if (err) + goto detach; + + buf = 0x80; + err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1); + if (err < 0) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto detach; + } + + buf = 0x00; + optlen = 1; + err = getsockopt(sock_fd, SOL_IP, IP_TOS, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto detach; + } + + if (buf != 0x80 + 2 * 0x10) { + log_err("Unexpected getsockopt 0x%x != 0x80 + 2 * 0x10", buf); + err = -1; + goto detach; + } + +detach: + prog_detach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); + prog_detach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); + + return err; +} + +void test_sockopt_multi(void) +{ + int cg_parent = -1, cg_child = -1; + struct bpf_object *obj = NULL; + int sock_fd = -1; + int err = -1; + + cg_parent = test__join_cgroup("/parent"); + if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent")) + goto out; + + cg_child = test__join_cgroup("/parent/child"); + if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child")) + goto out; + + obj = bpf_object__open_file("sockopt_multi.bpf.o", NULL); + if (!ASSERT_OK_PTR(obj, "obj_load")) + goto out; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + goto out; + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(sock_fd, 0, "socket")) + goto out; + + ASSERT_OK(run_getsockopt_test(obj, cg_parent, cg_child, sock_fd), "getsockopt_test"); + ASSERT_OK(run_setsockopt_test(obj, cg_parent, cg_child, sock_fd), "setsockopt_test"); + +out: + close(sock_fd); + bpf_object__close(obj); + close(cg_child); + close(cg_parent); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c new file mode 100644 index 000000000..6b53b3cb8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include <netinet/tcp.h> +#include "sockopt_qos_to_cc.skel.h" + +static void run_setsockopt_test(int cg_fd, int sock_fd) +{ + socklen_t optlen; + char cc[16]; /* TCP_CA_NAME_MAX */ + int buf; + int err = -1; + + buf = 0x2D; + err = setsockopt(sock_fd, SOL_IPV6, IPV6_TCLASS, &buf, sizeof(buf)); + if (!ASSERT_OK(err, "setsockopt(sock_fd, IPV6_TCLASS)")) + return; + + /* Verify the setsockopt cc change */ + optlen = sizeof(cc); + err = getsockopt(sock_fd, SOL_TCP, TCP_CONGESTION, cc, &optlen); + if (!ASSERT_OK(err, "getsockopt(sock_fd, TCP_CONGESTION)")) + return; + + if (!ASSERT_STREQ(cc, "reno", "getsockopt(sock_fd, TCP_CONGESTION)")) + return; +} + +void test_sockopt_qos_to_cc(void) +{ + struct sockopt_qos_to_cc *skel; + char cc_cubic[16] = "cubic"; /* TCP_CA_NAME_MAX */ + int cg_fd = -1; + int sock_fd = -1; + int err; + + cg_fd = test__join_cgroup("/sockopt_qos_to_cc"); + if (!ASSERT_GE(cg_fd, 0, "cg-join(sockopt_qos_to_cc)")) + return; + + skel = sockopt_qos_to_cc__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + goto done; + + sock_fd = socket(AF_INET6, SOCK_STREAM, 0); + if (!ASSERT_GE(sock_fd, 0, "v6 socket open")) + goto done; + + err = setsockopt(sock_fd, SOL_TCP, TCP_CONGESTION, &cc_cubic, + sizeof(cc_cubic)); + if (!ASSERT_OK(err, "setsockopt(sock_fd, TCP_CONGESTION)")) + goto done; + + skel->links.sockopt_qos_to_cc = + bpf_program__attach_cgroup(skel->progs.sockopt_qos_to_cc, + cg_fd); + if (!ASSERT_OK_PTR(skel->links.sockopt_qos_to_cc, + "prog_attach(sockopt_qos_to_cc)")) + goto done; + + run_setsockopt_test(cg_fd, sock_fd); + +done: + if (sock_fd != -1) + close(sock_fd); + if (cg_fd != -1) + close(cg_fd); + /* destroy can take null and error pointer */ + sockopt_qos_to_cc__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c new file mode 100644 index 000000000..05d0e07da --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" + +#include <linux/tcp.h> +#include <linux/netlink.h> +#include "sockopt_sk.skel.h" + +#ifndef SOL_TCP +#define SOL_TCP IPPROTO_TCP +#endif + +#define SOL_CUSTOM 0xdeadbeef + +static int getsetsockopt(void) +{ + int fd, err; + union { + char u8[4]; + __u32 u32; + char cc[16]; /* TCP_CA_NAME_MAX */ + struct tcp_zerocopy_receive zc; + } buf = {}; + socklen_t optlen; + char *big_buf = NULL; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (fd < 0) { + log_err("Failed to create socket"); + return -1; + } + + /* IP_TOS - BPF bypass */ + + optlen = getpagesize() * 2; + big_buf = calloc(1, optlen); + if (!big_buf) { + log_err("Couldn't allocate two pages"); + goto err; + } + + *(int *)big_buf = 0x08; + err = setsockopt(fd, SOL_IP, IP_TOS, big_buf, optlen); + if (err) { + log_err("Failed to call setsockopt(IP_TOS)"); + goto err; + } + + memset(big_buf, 0, optlen); + optlen = 1; + err = getsockopt(fd, SOL_IP, IP_TOS, big_buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(IP_TOS)"); + goto err; + } + + if (*big_buf != 0x08) { + log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08", + (int)*big_buf); + goto err; + } + + /* IP_TTL - EPERM */ + + buf.u8[0] = 1; + err = setsockopt(fd, SOL_IP, IP_TTL, &buf, 1); + if (!err || errno != EPERM) { + log_err("Unexpected success from setsockopt(IP_TTL)"); + goto err; + } + + /* SOL_CUSTOM - handled by BPF */ + + buf.u8[0] = 0x01; + err = setsockopt(fd, SOL_CUSTOM, 0, &buf, 1); + if (err) { + log_err("Failed to call setsockopt"); + goto err; + } + + buf.u32 = 0x00; + optlen = 4; + err = getsockopt(fd, SOL_CUSTOM, 0, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt"); + goto err; + } + + if (optlen != 1) { + log_err("Unexpected optlen %d != 1", optlen); + goto err; + } + if (buf.u8[0] != 0x01) { + log_err("Unexpected buf[0] 0x%02x != 0x01", buf.u8[0]); + goto err; + } + + /* IP_FREEBIND - BPF can't access optval past PAGE_SIZE */ + + optlen = getpagesize() * 2; + memset(big_buf, 0, optlen); + + err = setsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, optlen); + if (err != 0) { + log_err("Failed to call setsockopt, ret=%d", err); + goto err; + } + + err = getsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, &optlen); + if (err != 0) { + log_err("Failed to call getsockopt, ret=%d", err); + goto err; + } + + if (optlen != 1 || *(__u8 *)big_buf != 0x55) { + log_err("Unexpected IP_FREEBIND getsockopt, optlen=%d, optval=0x%x", + optlen, *(__u8 *)big_buf); + } + + /* SO_SNDBUF is overwritten */ + + buf.u32 = 0x01010101; + err = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buf, 4); + if (err) { + log_err("Failed to call setsockopt(SO_SNDBUF)"); + goto err; + } + + buf.u32 = 0x00; + optlen = 4; + err = getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(SO_SNDBUF)"); + goto err; + } + + if (buf.u32 != 0x55AA*2) { + log_err("Unexpected getsockopt(SO_SNDBUF) 0x%x != 0x55AA*2", + buf.u32); + goto err; + } + + /* TCP_CONGESTION can extend the string */ + + strcpy(buf.cc, "nv"); + err = setsockopt(fd, SOL_TCP, TCP_CONGESTION, &buf, strlen("nv")); + if (err) { + log_err("Failed to call setsockopt(TCP_CONGESTION)"); + goto err; + } + + + optlen = sizeof(buf.cc); + err = getsockopt(fd, SOL_TCP, TCP_CONGESTION, &buf, &optlen); + if (err) { + log_err("Failed to call getsockopt(TCP_CONGESTION)"); + goto err; + } + + if (strcmp(buf.cc, "cubic") != 0) { + log_err("Unexpected getsockopt(TCP_CONGESTION) %s != %s", + buf.cc, "cubic"); + goto err; + } + + /* TCP_ZEROCOPY_RECEIVE triggers */ + memset(&buf, 0, sizeof(buf)); + optlen = sizeof(buf.zc); + err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen); + if (err) { + log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d", + err, errno); + goto err; + } + + memset(&buf, 0, sizeof(buf)); + buf.zc.address = 12345; /* Not page aligned. Rejected by tcp_zerocopy_receive() */ + optlen = sizeof(buf.zc); + errno = 0; + err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen); + if (errno != EINVAL) { + log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d", + err, errno); + goto err; + } + + /* optval=NULL case is handled correctly */ + + close(fd); + fd = socket(AF_NETLINK, SOCK_RAW, 0); + if (fd < 0) { + log_err("Failed to create AF_NETLINK socket"); + return -1; + } + + buf.u32 = 1; + optlen = sizeof(__u32); + err = setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &buf, optlen); + if (err) { + log_err("Unexpected getsockopt(NETLINK_ADD_MEMBERSHIP) err=%d errno=%d", + err, errno); + goto err; + } + + optlen = 0; + err = getsockopt(fd, SOL_NETLINK, NETLINK_LIST_MEMBERSHIPS, NULL, &optlen); + if (err) { + log_err("Unexpected getsockopt(NETLINK_LIST_MEMBERSHIPS) err=%d errno=%d", + err, errno); + goto err; + } + ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value"); + + free(big_buf); + close(fd); + return 0; +err: + free(big_buf); + close(fd); + return -1; +} + +static void run_test(int cgroup_fd) +{ + struct sockopt_sk *skel; + + skel = sockopt_sk__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + skel->bss->page_size = getpagesize(); + + skel->links._setsockopt = + bpf_program__attach_cgroup(skel->progs._setsockopt, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links._setsockopt, "setsockopt_link")) + goto cleanup; + + skel->links._getsockopt = + bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link")) + goto cleanup; + + ASSERT_OK(getsetsockopt(), "getsetsockopt"); + +cleanup: + sockopt_sk__destroy(skel); +} + +void test_sockopt_sk(void) +{ + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/sockopt_sk"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /sockopt_sk")) + return; + + run_test(cgroup_fd); + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c new file mode 100644 index 000000000..15eb1372d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +static void *spin_lock_thread(void *arg) +{ + int err, prog_fd = *(u32 *) arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10000, + ); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_OK(topts.retval, "test_run retval"); + pthread_exit(arg); +} + +void test_spinlock(void) +{ + const char *file = "./test_spin_lock.bpf.o"; + pthread_t thread_id[4]; + struct bpf_object *obj = NULL; + int prog_fd; + int err = 0, i; + void *ret; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); + if (CHECK_FAIL(err)) { + printf("test_spin_lock:bpf_prog_test_load errno %d\n", errno); + goto close_prog; + } + for (i = 0; i < 4; i++) + if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, + &spin_lock_thread, &prog_fd))) + goto close_prog; + + for (i = 0; i < 4; i++) + if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || + ret != (void *)&prog_fd)) + goto close_prog; +close_prog: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stack_var_off.c b/tools/testing/selftests/bpf/prog_tests/stack_var_off.c new file mode 100644 index 000000000..2ce9deefa --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stack_var_off.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "test_stack_var_off.skel.h" + +/* Test read and writes to the stack performed with offsets that are not + * statically known. + */ +void test_stack_var_off(void) +{ + int duration = 0; + struct test_stack_var_off *skel; + + skel = test_stack_var_off__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + /* Give pid to bpf prog so it doesn't trigger for anyone else. */ + skel->bss->test_pid = getpid(); + /* Initialize the probe's input. */ + skel->bss->input[0] = 2; + skel->bss->input[1] = 42; /* This will be returned in probe_res. */ + + if (!ASSERT_OK(test_stack_var_off__attach(skel), "skel_attach")) + goto cleanup; + + /* Trigger probe. */ + usleep(1); + + if (CHECK(skel->bss->probe_res != 42, "check_probe_res", + "wrong probe res: %d\n", skel->bss->probe_res)) + goto cleanup; + +cleanup: + test_stack_var_off__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c new file mode 100644 index 000000000..9ad09a6c5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "test_stacktrace_build_id.skel.h" + +void test_stacktrace_build_id(void) +{ + + int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; + struct test_stacktrace_build_id *skel; + int err, stack_trace_len; + __u32 key, prev_key, val, duration = 0; + char buf[256]; + int i, j; + struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; + int build_id_matches = 0; + int retry = 1; + +retry: + skel = test_stacktrace_build_id__open_and_load(); + if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n")) + return; + + err = test_stacktrace_build_id__attach(skel); + if (CHECK(err, "attach_tp", "err %d\n", err)) + goto cleanup; + + /* find map fds */ + control_map_fd = bpf_map__fd(skel->maps.control_map); + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); + stackmap_fd = bpf_map__fd(skel->maps.stackmap); + stack_amap_fd = bpf_map__fd(skel->maps.stack_amap); + + if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null"))) + goto cleanup; + if (CHECK_FAIL(system("./urandom_read"))) + goto cleanup; + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto cleanup; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto cleanup; + + err = extract_build_id(buf, 256); + + if (CHECK(err, "get build_id with readelf", + "err %d errno %d\n", err, errno)) + goto cleanup; + + err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key)); + if (CHECK(err, "get_next_key from stackmap", + "err %d, errno %d\n", err, errno)) + goto cleanup; + + do { + char build_id[64]; + + err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); + if (CHECK(err, "lookup_elem from stackmap", + "err %d, errno %d\n", err, errno)) + goto cleanup; + for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) + if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && + id_offs[i].offset != 0) { + for (j = 0; j < 20; ++j) + sprintf(build_id + 2 * j, "%02x", + id_offs[i].build_id[j] & 0xff); + if (strstr(buf, build_id) != NULL) + build_id_matches = 1; + } + prev_key = key; + } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0); + + /* stack_map_get_build_id_offset() is racy and sometimes can return + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; + * try it one more time. + */ + if (build_id_matches < 1 && retry--) { + test_stacktrace_build_id__destroy(skel); + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", + __func__); + goto retry; + } + + if (CHECK(build_id_matches < 1, "build id match", + "Didn't find expected build ID from the map\n")) + goto cleanup; + + stack_trace_len = PERF_MAX_STACK_DEPTH * + sizeof(struct bpf_stack_build_id); + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); + CHECK(err, "compare_stack_ips stackmap vs. stack_amap", + "err %d errno %d\n", err, errno); + +cleanup: + test_stacktrace_build_id__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c new file mode 100644 index 000000000..704f7f6c3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "test_stacktrace_build_id.skel.h" + +void test_stacktrace_build_id_nmi(void) +{ + int control_map_fd, stackid_hmap_fd, stackmap_fd; + struct test_stacktrace_build_id *skel; + int err, pmu_fd; + struct perf_event_attr attr = { + .freq = 1, + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + }; + __u32 key, prev_key, val, duration = 0; + char buf[256]; + int i, j; + struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; + int build_id_matches = 0; + int retry = 1; + + attr.sample_freq = read_perf_max_sample_freq(); + +retry: + skel = test_stacktrace_build_id__open(); + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + return; + + /* override program type */ + bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT); + + err = test_stacktrace_build_id__load(skel); + if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) + goto cleanup; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (pmu_fd < 0 && errno == ENOENT) { + printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__); + test__skip(); + goto cleanup; + } + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) { + close(pmu_fd); + goto cleanup; + } + + /* find map fds */ + control_map_fd = bpf_map__fd(skel->maps.control_map); + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); + stackmap_fd = bpf_map__fd(skel->maps.stackmap); + + if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null"))) + goto cleanup; + if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000"))) + goto cleanup; + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto cleanup; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto cleanup; + + err = extract_build_id(buf, 256); + + if (CHECK(err, "get build_id with readelf", + "err %d errno %d\n", err, errno)) + goto cleanup; + + err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key)); + if (CHECK(err, "get_next_key from stackmap", + "err %d, errno %d\n", err, errno)) + goto cleanup; + + do { + char build_id[64]; + + err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key), + id_offs, sizeof(id_offs), 0); + if (CHECK(err, "lookup_elem from stackmap", + "err %d, errno %d\n", err, errno)) + goto cleanup; + for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) + if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && + id_offs[i].offset != 0) { + for (j = 0; j < 20; ++j) + sprintf(build_id + 2 * j, "%02x", + id_offs[i].build_id[j] & 0xff); + if (strstr(buf, build_id) != NULL) + build_id_matches = 1; + } + prev_key = key; + } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0); + + /* stack_map_get_build_id_offset() is racy and sometimes can return + * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; + * try it one more time. + */ + if (build_id_matches < 1 && retry--) { + test_stacktrace_build_id__destroy(skel); + printf("%s:WARN:Didn't find expected build ID from the map, retrying\n", + __func__); + goto retry; + } + + if (CHECK(build_id_matches < 1, "build id match", + "Didn't find expected build ID from the map\n")) + goto cleanup; + + /* + * We intentionally skip compare_stack_ips(). This is because we + * only support one in_nmi() ips-to-build_id translation per cpu + * at any time, thus stack_amap here will always fallback to + * BPF_STACK_BUILD_ID_IP; + */ + +cleanup: + test_stacktrace_build_id__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c new file mode 100644 index 000000000..df59e4ae2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_stacktrace_map(void) +{ + int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; + const char *prog_name = "oncpu"; + int err, prog_fd, stack_trace_len; + const char *file = "./test_stacktrace_map.bpf.o"; + __u32 key, val, duration = 0; + struct bpf_program *prog; + struct bpf_object *obj; + struct bpf_link *link; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + return; + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name)) + goto close_prog; + + link = bpf_program__attach_tracepoint(prog, "sched", "sched_switch"); + if (!ASSERT_OK_PTR(link, "attach_tp")) + goto close_prog; + + /* find map fds */ + control_map_fd = bpf_find_map(__func__, obj, "control_map"); + if (CHECK_FAIL(control_map_fd < 0)) + goto disable_pmu; + + stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); + if (CHECK_FAIL(stackid_hmap_fd < 0)) + goto disable_pmu; + + stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); + if (CHECK_FAIL(stackmap_fd < 0)) + goto disable_pmu; + + stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); + if (CHECK_FAIL(stack_amap_fd < 0)) + goto disable_pmu; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64); + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); + if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + +disable_pmu: + bpf_link__destroy(link); +close_prog: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c new file mode 100644 index 000000000..c6ef06f55 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_stacktrace_map_raw_tp(void) +{ + const char *prog_name = "oncpu"; + int control_map_fd, stackid_hmap_fd, stackmap_fd; + const char *file = "./test_stacktrace_map.bpf.o"; + __u32 key, val, duration = 0; + int err, prog_fd; + struct bpf_program *prog; + struct bpf_object *obj; + struct bpf_link *link = NULL; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name)) + goto close_prog; + + link = bpf_program__attach_raw_tracepoint(prog, "sched_switch"); + if (!ASSERT_OK_PTR(link, "attach_raw_tp")) + goto close_prog; + + /* find map fds */ + control_map_fd = bpf_find_map(__func__, obj, "control_map"); + if (CHECK_FAIL(control_map_fd < 0)) + goto close_prog; + + stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); + if (CHECK_FAIL(stackid_hmap_fd < 0)) + goto close_prog; + + stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); + if (CHECK_FAIL(stackmap_fd < 0)) + goto close_prog; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto close_prog; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto close_prog; + +close_prog: + bpf_link__destroy(link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c new file mode 100644 index 000000000..1932b1e06 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "stacktrace_map_skip.skel.h" + +#define TEST_STACK_DEPTH 2 + +void test_stacktrace_map_skip(void) +{ + struct stacktrace_map_skip *skel; + int stackid_hmap_fd, stackmap_fd, stack_amap_fd; + int err, stack_trace_len; + + skel = stacktrace_map_skip__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + /* find map fds */ + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); + if (!ASSERT_GE(stackid_hmap_fd, 0, "stackid_hmap fd")) + goto out; + + stackmap_fd = bpf_map__fd(skel->maps.stackmap); + if (!ASSERT_GE(stackmap_fd, 0, "stackmap fd")) + goto out; + + stack_amap_fd = bpf_map__fd(skel->maps.stack_amap); + if (!ASSERT_GE(stack_amap_fd, 0, "stack_amap fd")) + goto out; + + skel->bss->pid = getpid(); + + err = stacktrace_map_skip__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + skel->bss->control = 1; + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap")) + goto out; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (!ASSERT_OK(err, "compare_map_keys stackmap vs. stackid_hmap")) + goto out; + + stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64); + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); + if (!ASSERT_OK(err, "compare_stack_ips stackmap vs. stack_amap")) + goto out; + + if (!ASSERT_EQ(skel->bss->failed, 0, "skip_failed")) + goto out; + +out: + stacktrace_map_skip__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/static_linked.c b/tools/testing/selftests/bpf/prog_tests/static_linked.c new file mode 100644 index 000000000..5c4e3014e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/static_linked.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include <test_progs.h> +#include "test_static_linked.skel.h" + +void test_static_linked(void) +{ + int err; + struct test_static_linked* skel; + + skel = test_static_linked__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->rodata->rovar1 = 1; + skel->rodata->rovar2 = 4; + + err = test_static_linked__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + err = test_static_linked__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* trigger */ + usleep(1); + + ASSERT_EQ(skel->data->var1, 1 * 2 + 2 + 3, "var1"); + ASSERT_EQ(skel->data->var2, 4 * 3 + 5 + 6, "var2"); + +cleanup: + test_static_linked__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/subprogs.c b/tools/testing/selftests/bpf/prog_tests/subprogs.c new file mode 100644 index 000000000..903f35a9e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/subprogs.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> +#include "test_subprogs.skel.h" +#include "test_subprogs_unused.skel.h" + +struct toggler_ctx { + int fd; + bool stop; +}; + +static void *toggle_jit_harden(void *arg) +{ + struct toggler_ctx *ctx = arg; + char two = '2'; + char zero = '0'; + + while (!ctx->stop) { + lseek(ctx->fd, SEEK_SET, 0); + write(ctx->fd, &two, sizeof(two)); + lseek(ctx->fd, SEEK_SET, 0); + write(ctx->fd, &zero, sizeof(zero)); + } + + return NULL; +} + +static void test_subprogs_with_jit_harden_toggling(void) +{ + struct toggler_ctx ctx; + pthread_t toggler; + int err; + unsigned int i, loop = 10; + + ctx.fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR); + if (!ASSERT_GE(ctx.fd, 0, "open bpf_jit_harden")) + return; + + ctx.stop = false; + err = pthread_create(&toggler, NULL, toggle_jit_harden, &ctx); + if (!ASSERT_OK(err, "new toggler")) + goto out; + + /* Make toggler thread to run */ + usleep(1); + + for (i = 0; i < loop; i++) { + struct test_subprogs *skel = test_subprogs__open_and_load(); + + if (!ASSERT_OK_PTR(skel, "skel open")) + break; + test_subprogs__destroy(skel); + } + + ctx.stop = true; + pthread_join(toggler, NULL); +out: + close(ctx.fd); +} + +static void test_subprogs_alone(void) +{ + struct test_subprogs *skel; + struct test_subprogs_unused *skel2; + int err; + + skel = test_subprogs__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + err = test_subprogs__attach(skel); + if (!ASSERT_OK(err, "skel attach")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->res1, 12, "res1"); + ASSERT_EQ(skel->bss->res2, 17, "res2"); + ASSERT_EQ(skel->bss->res3, 19, "res3"); + ASSERT_EQ(skel->bss->res4, 36, "res4"); + + skel2 = test_subprogs_unused__open_and_load(); + ASSERT_OK_PTR(skel2, "unused_progs_skel"); + test_subprogs_unused__destroy(skel2); + +cleanup: + test_subprogs__destroy(skel); +} + +void test_subprogs(void) +{ + if (test__start_subtest("subprogs_alone")) + test_subprogs_alone(); + if (test__start_subtest("subprogs_and_jit_harden")) + test_subprogs_with_jit_harden_toggling(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/subskeleton.c b/tools/testing/selftests/bpf/prog_tests/subskeleton.c new file mode 100644 index 000000000..9c31b7004 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/subskeleton.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <test_progs.h> +#include "test_subskeleton.skel.h" +#include "test_subskeleton_lib.subskel.h" + +static void subskeleton_lib_setup(struct bpf_object *obj) +{ + struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj); + + if (!ASSERT_OK_PTR(lib, "open subskeleton")) + return; + + *lib->rodata.var1 = 1; + *lib->data.var2 = 2; + lib->bss.var3->var3_1 = 3; + lib->bss.var3->var3_2 = 4; + + test_subskeleton_lib__destroy(lib); +} + +static int subskeleton_lib_subresult(struct bpf_object *obj) +{ + struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj); + int result; + + if (!ASSERT_OK_PTR(lib, "open subskeleton")) + return -EINVAL; + + result = *lib->bss.libout1; + ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult"); + + ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler"); + ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler), + "lib_perf_handler", "program name"); + + ASSERT_OK_PTR(lib->maps.map1, "map1"); + ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name"); + + ASSERT_EQ(*lib->data.var5, 5, "__weak var5"); + ASSERT_EQ(*lib->data.var6, 6, "extern var6"); + ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL"); + + test_subskeleton_lib__destroy(lib); + return result; +} + +void test_subskeleton(void) +{ + int err, result; + struct test_subskeleton *skel; + + skel = test_subskeleton__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->rodata->rovar1 = 10; + skel->rodata->var1 = 1; + subskeleton_lib_setup(skel->obj); + + err = test_subskeleton__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + err = test_subskeleton__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + result = subskeleton_lib_subresult(skel->obj) * 10; + ASSERT_EQ(skel->bss->out1, result, "unexpected calculation"); + +cleanup: + test_subskeleton__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/syscall.c b/tools/testing/selftests/bpf/prog_tests/syscall.c new file mode 100644 index 000000000..f4d400011 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/syscall.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "syscall.skel.h" + +struct args { + __u64 log_buf; + __u32 log_size; + int max_entries; + int map_fd; + int prog_fd; + int btf_fd; +}; + +void test_syscall(void) +{ + static char verifier_log[8192]; + struct args ctx = { + .max_entries = 1024, + .log_buf = (uintptr_t) verifier_log, + .log_size = sizeof(verifier_log), + }; + LIBBPF_OPTS(bpf_test_run_opts, tattr, + .ctx_in = &ctx, + .ctx_size_in = sizeof(ctx), + ); + struct syscall *skel = NULL; + __u64 key = 12, value = 0; + int err, prog_fd; + + skel = syscall__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.bpf_prog); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_EQ(err, 0, "err"); + ASSERT_EQ(tattr.retval, 1, "retval"); + ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd"); + ASSERT_GT(ctx.prog_fd, 0, "ctx.prog_fd"); + ASSERT_OK(memcmp(verifier_log, "processed", sizeof("processed") - 1), + "verifier_log"); + + err = bpf_map_lookup_elem(ctx.map_fd, &key, &value); + ASSERT_EQ(err, 0, "map_lookup"); + ASSERT_EQ(value, 34, "map lookup value"); +cleanup: + syscall__destroy(skel); + if (ctx.prog_fd > 0) + close(ctx.prog_fd); + if (ctx.map_fd > 0) + close(ctx.map_fd); + if (ctx.btf_fd > 0) + close(ctx.btf_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c new file mode 100644 index 000000000..09c189761 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -0,0 +1,913 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +/* test_tailcall_1 checks basic functionality by patching multiple locations + * in a single program for a single tail call slot with nop->jmp, jmp->nop + * and jmp->jmp rewrites. Also checks for nop->nop. + */ +static void test_tailcall_1(void) +{ + int err, map_fd, prog_fd, main_fd, i, j; + struct bpf_map *prog_array; + struct bpf_program *prog; + struct bpf_object *obj; + char prog_name[32]; + char buff[128] = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, i, "tailcall retval"); + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_OK(topts.retval, "tailcall retval"); + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + j = bpf_map__max_entries(prog_array) - 1 - i; + snprintf(prog_name, sizeof(prog_name), "classifier_%d", j); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + j = bpf_map__max_entries(prog_array) - 1 - i; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, j, "tailcall retval"); + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err >= 0 || errno != ENOENT)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); + } + +out: + bpf_object__close(obj); +} + +/* test_tailcall_2 checks that patching multiple programs for a single + * tail call slot works. It also jumps through several programs and tests + * the tail call limit counter. + */ +static void test_tailcall_2(void) +{ + int err, map_fd, prog_fd, main_fd, i; + struct bpf_map *prog_array; + struct bpf_program *prog; + struct bpf_object *obj; + char prog_name[32]; + char buff[128] = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 2, "tailcall retval"); + + i = 2; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); + + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); +out: + bpf_object__close(obj); +} + +static void test_tailcall_count(const char *which) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i, val; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + char buff[128] = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + prog = bpf_object__find_program_by_name(obj, "classifier_0"); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + i = 0; + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + goto out; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(data_fd < 0)) + goto out; + + i = 0; + err = bpf_map_lookup_elem(data_fd, &i, &val); + ASSERT_OK(err, "tailcall count"); + ASSERT_EQ(val, 33, "tailcall count"); + + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_OK(topts.retval, "tailcall retval"); +out: + bpf_object__close(obj); +} + +/* test_tailcall_3 checks that the count value of the tail call limit + * enforcement matches with expectations. JIT uses direct jump. + */ +static void test_tailcall_3(void) +{ + test_tailcall_count("tailcall3.bpf.o"); +} + +/* test_tailcall_6 checks that the count value of the tail call limit + * enforcement matches with expectations. JIT uses indirect jump. + */ +static void test_tailcall_6(void) +{ + test_tailcall_count("tailcall6.bpf.o"); +} + +/* test_tailcall_4 checks that the kernel properly selects indirect jump + * for the case where the key is not known. Latter is passed via global + * data to select different targets we can compare return value of. + */ +static void test_tailcall_4(void) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + static const int zero = 0; + char buff[128] = {}; + char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + goto out; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(data_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, i, "tailcall retval"); + } + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); + } +out: + bpf_object__close(obj); +} + +/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates + * an indirect jump when the keys are const but different from different branches. + */ +static void test_tailcall_5(void) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 }; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + static const int zero = 0; + char buff[128] = {}; + char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj, + &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + goto out; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(data_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, i, "tailcall retval"); + } + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); + } +out: + bpf_object__close(obj); +} + +/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working + * correctly in correlation with BPF subprograms + */ +static void test_tailcall_bpf2bpf_1(void) +{ + int err, map_fd, prog_fd, main_fd, i; + struct bpf_map *prog_array; + struct bpf_program *prog; + struct bpf_object *obj; + char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + /* nop -> jmp */ + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); + + /* jmp -> nop, call subprog that will do tailcall */ + i = 1; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_OK(topts.retval, "tailcall retval"); + + /* make sure that subprog can access ctx and entry prog that + * called this subprog can properly return + */ + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval"); +out: + bpf_object__close(obj); +} + +/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit + * enforcement matches with expectations when tailcall is preceded with + * bpf2bpf call. + */ +static void test_tailcall_bpf2bpf_2(void) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i, val; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + char buff[128] = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + prog = bpf_object__find_program_by_name(obj, "classifier_0"); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + i = 0; + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + goto out; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(data_fd < 0)) + goto out; + + i = 0; + err = bpf_map_lookup_elem(data_fd, &i, &val); + ASSERT_OK(err, "tailcall count"); + ASSERT_EQ(val, 33, "tailcall count"); + + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_OK(topts.retval, "tailcall retval"); +out: + bpf_object__close(obj); +} + +/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to + * 256 bytes) can be used within bpf subprograms that have the tailcalls + * in them + */ +static void test_tailcall_bpf2bpf_3(void) +{ + int err, map_fd, prog_fd, main_fd, i; + struct bpf_map *prog_array; + struct bpf_program *prog; + struct bpf_object *obj; + char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval"); + + i = 1; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval"); + + i = 0; + err = bpf_map_delete_elem(map_fd, &i); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval"); +out: + bpf_object__close(obj); +} + +#include "tailcall_bpf2bpf4.skel.h" + +/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved + * across tailcalls combined with bpf2bpf calls. for making sure that tailcall + * counter behaves correctly, bpf program will go through following flow: + * + * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 -> + * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 -> + * subprog2 [here bump global counter] --------^ + * + * We go through first two tailcalls and start counting from the subprog2 where + * the loop begins. At the end of the test make sure that the global counter is + * equal to 31, because tailcall counter includes the first two tailcalls + * whereas global counter is incremented only on loop presented on flow above. + * + * The noise parameter is used to insert bpf_map_update calls into the logic + * to force verifier to patch instructions. This allows us to ensure jump + * logic remains correct with instruction movement. + */ +static void test_tailcall_bpf2bpf_4(bool noise) +{ + int err, map_fd, prog_fd, main_fd, data_fd, i; + struct tailcall_bpf2bpf4__bss val; + struct bpf_map *prog_array, *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + prog = bpf_object__find_program_by_name(obj, "entry"); + if (CHECK_FAIL(!prog)) + goto out; + + main_fd = bpf_program__fd(prog); + if (CHECK_FAIL(main_fd < 0)) + goto out; + + prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); + if (CHECK_FAIL(!prog_array)) + goto out; + + map_fd = bpf_map__fd(prog_array); + if (CHECK_FAIL(map_fd < 0)) + goto out; + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); + + prog = bpf_object__find_program_by_name(obj, prog_name); + if (CHECK_FAIL(!prog)) + goto out; + + prog_fd = bpf_program__fd(prog); + if (CHECK_FAIL(prog_fd < 0)) + goto out; + + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + } + + data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); + if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) + goto out; + + data_fd = bpf_map__fd(data_map); + if (CHECK_FAIL(data_fd < 0)) + goto out; + + i = 0; + val.noise = noise; + val.count = 0; + err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY); + if (CHECK_FAIL(err)) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval"); + + i = 0; + err = bpf_map_lookup_elem(data_fd, &i, &val); + ASSERT_OK(err, "tailcall count"); + ASSERT_EQ(val.count, 31, "tailcall count"); + +out: + bpf_object__close(obj); +} + +#include "tailcall_bpf2bpf6.skel.h" + +/* Tail call counting works even when there is data on stack which is + * not aligned to 8 bytes. + */ +static void test_tailcall_bpf2bpf_6(void) +{ + struct tailcall_bpf2bpf6 *obj; + int err, map_fd, prog_fd, main_fd, data_fd, i, val; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + obj = tailcall_bpf2bpf6__open_and_load(); + if (!ASSERT_OK_PTR(obj, "open and load")) + return; + + main_fd = bpf_program__fd(obj->progs.entry); + if (!ASSERT_GE(main_fd, 0, "entry prog fd")) + goto out; + + map_fd = bpf_map__fd(obj->maps.jmp_table); + if (!ASSERT_GE(map_fd, 0, "jmp_table map fd")) + goto out; + + prog_fd = bpf_program__fd(obj->progs.classifier_0); + if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd")) + goto out; + + i = 0; + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "jmp_table map update")) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "entry prog test run"); + ASSERT_EQ(topts.retval, 0, "tailcall retval"); + + data_fd = bpf_map__fd(obj->maps.bss); + if (!ASSERT_GE(data_fd, 0, "bss map fd")) + goto out; + + i = 0; + err = bpf_map_lookup_elem(data_fd, &i, &val); + ASSERT_OK(err, "bss map lookup"); + ASSERT_EQ(val, 1, "done flag is set"); + +out: + tailcall_bpf2bpf6__destroy(obj); +} + +void test_tailcalls(void) +{ + if (test__start_subtest("tailcall_1")) + test_tailcall_1(); + if (test__start_subtest("tailcall_2")) + test_tailcall_2(); + if (test__start_subtest("tailcall_3")) + test_tailcall_3(); + if (test__start_subtest("tailcall_4")) + test_tailcall_4(); + if (test__start_subtest("tailcall_5")) + test_tailcall_5(); + if (test__start_subtest("tailcall_6")) + test_tailcall_6(); + if (test__start_subtest("tailcall_bpf2bpf_1")) + test_tailcall_bpf2bpf_1(); + if (test__start_subtest("tailcall_bpf2bpf_2")) + test_tailcall_bpf2bpf_2(); + if (test__start_subtest("tailcall_bpf2bpf_3")) + test_tailcall_bpf2bpf_3(); + if (test__start_subtest("tailcall_bpf2bpf_4")) + test_tailcall_bpf2bpf_4(false); + if (test__start_subtest("tailcall_bpf2bpf_5")) + test_tailcall_bpf2bpf_4(true); + if (test__start_subtest("tailcall_bpf2bpf_6")) + test_tailcall_bpf2bpf_6(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c new file mode 100644 index 000000000..3d34bab01 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_task_fd_query_rawtp(void) +{ + const char *file = "./test_get_stack_rawtp.bpf.o"; + __u64 probe_offset, probe_addr; + __u32 len, prog_id, fd_type; + struct bpf_object *obj; + int efd, err, prog_fd; + __u32 duration = 0; + char buf[256]; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; + + efd = bpf_raw_tracepoint_open("sys_enter", prog_fd); + if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) + goto close_prog; + + /* query (getpid(), efd) */ + len = sizeof(buf); + err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err, + errno)) + goto close_prog; + + err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && + strcmp(buf, "sys_enter") == 0; + if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n", + fd_type, buf)) + goto close_prog; + + /* test zero len */ + len = 0; + err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n", + err, errno)) + goto close_prog; + err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && + len == strlen("sys_enter"); + if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) + goto close_prog; + + /* test empty buffer */ + len = sizeof(buf); + err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n", + err, errno)) + goto close_prog; + err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && + len == strlen("sys_enter"); + if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) + goto close_prog; + + /* test smaller buffer */ + len = 3; + err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)", + "err %d errno %d\n", err, errno)) + goto close_prog; + err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT && + len == strlen("sys_enter") && + strcmp(buf, "sy") == 0; + if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) + goto close_prog; + +close_prog: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c new file mode 100644 index 000000000..c717741bf --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +static void test_task_fd_query_tp_core(const char *probe_name, + const char *tp_name) +{ + const char *file = "./test_tracepoint.bpf.o"; + int err, bytes, efd, prog_fd, pmu_fd; + struct perf_event_attr attr = {}; + __u64 probe_offset, probe_addr; + __u32 len, prog_id, fd_type; + struct bpf_object *obj = NULL; + __u32 duration = 0; + char buf[256]; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno)) + goto close_prog; + + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/%s/id", probe_name); + efd = open(buf, O_RDONLY, 0); + if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + goto close_prog; + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read", + "bytes %d errno %d\n", bytes, errno)) + goto close_prog; + + attr.config = strtol(buf, NULL, 0); + attr.type = PERF_TYPE_TRACEPOINT; + attr.sample_type = PERF_SAMPLE_RAW; + attr.sample_period = 1; + attr.wakeup_events = 1; + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno)) + goto close_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err, + errno)) + goto close_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err, + errno)) + goto close_pmu; + + /* query (getpid(), pmu_fd) */ + len = sizeof(buf); + err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id, + &fd_type, &probe_offset, &probe_addr); + if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err, + errno)) + goto close_pmu; + + err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name); + if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n", + fd_type, buf)) + goto close_pmu; + +close_pmu: + close(pmu_fd); +close_prog: + bpf_object__close(obj); +} + +void test_task_fd_query_tp(void) +{ + test_task_fd_query_tp_core("sched/sched_switch", + "sched_switch"); + test_task_fd_query_tp_core("syscalls/sys_enter_read", + "sys_enter_read"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c new file mode 100644 index 000000000..035c263aa --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#define _GNU_SOURCE /* See feature_test_macros(7) */ +#include <unistd.h> +#include <sys/syscall.h> /* For SYS_xxx definitions */ +#include <sys/types.h> +#include <test_progs.h> +#include "task_local_storage.skel.h" +#include "task_local_storage_exit_creds.skel.h" +#include "task_ls_recursion.skel.h" + +static void test_sys_enter_exit(void) +{ + struct task_local_storage *skel; + int err; + + skel = task_local_storage__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + skel->bss->target_pid = syscall(SYS_gettid); + + err = task_local_storage__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + syscall(SYS_gettid); + syscall(SYS_gettid); + + /* 3x syscalls: 1x attach and 2x gettid */ + ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt"); + ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt"); + ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt"); +out: + task_local_storage__destroy(skel); +} + +static void test_exit_creds(void) +{ + struct task_local_storage_exit_creds *skel; + int err; + + skel = task_local_storage_exit_creds__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + err = task_local_storage_exit_creds__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + /* trigger at least one exit_creds() */ + if (CHECK_FAIL(system("ls > /dev/null"))) + goto out; + + /* sync rcu to make sure exit_creds() is called for "ls" */ + kern_sync_rcu(); + ASSERT_EQ(skel->bss->valid_ptr_count, 0, "valid_ptr_count"); + ASSERT_NEQ(skel->bss->null_ptr_count, 0, "null_ptr_count"); +out: + task_local_storage_exit_creds__destroy(skel); +} + +static void test_recursion(void) +{ + struct task_ls_recursion *skel; + int err; + + skel = task_ls_recursion__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + err = task_ls_recursion__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + /* trigger sys_enter, make sure it does not cause deadlock */ + syscall(SYS_gettid); + +out: + task_ls_recursion__destroy(skel); +} + +void test_task_local_storage(void) +{ + if (test__start_subtest("sys_enter_exit")) + test_sys_enter_exit(); + if (test__start_subtest("exit_creds")) + test_exit_creds(); + if (test__start_subtest("recursion")) + test_recursion(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c new file mode 100644 index 000000000..f000734a3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <test_progs.h> +#include "test_task_pt_regs.skel.h" + +/* uprobe attach point */ +static noinline void trigger_func(void) +{ + asm volatile (""); +} + +void test_task_pt_regs(void) +{ + struct test_task_pt_regs *skel; + struct bpf_link *uprobe_link; + ssize_t uprobe_offset; + bool match; + + uprobe_offset = get_uprobe_offset(&trigger_func); + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) + return; + + skel = test_task_pt_regs__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + if (!ASSERT_OK_PTR(skel->bss, "check_bss")) + goto cleanup; + + uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe, + false /* retprobe */, + 0 /* self pid */, + "/proc/self/exe", + uprobe_offset); + if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe")) + goto cleanup; + skel->links.handle_uprobe = uprobe_link; + + /* trigger & validate uprobe */ + trigger_func(); + + if (!ASSERT_EQ(skel->bss->uprobe_res, 1, "check_uprobe_res")) + goto cleanup; + + match = !memcmp(&skel->bss->current_regs, &skel->bss->ctx_regs, + sizeof(skel->bss->current_regs)); + ASSERT_TRUE(match, "check_regs_match"); + +cleanup: + test_task_pt_regs__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c new file mode 100644 index 000000000..4a505a5ad --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include <linux/pkt_cls.h> + +#include "test_tc_bpf.skel.h" + +#define LO_IFINDEX 1 + +#define TEST_DECLARE_OPTS(__fd) \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_h, .handle = 1); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_p, .priority = 1); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_f, .prog_fd = __fd); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hp, .handle = 1, .priority = 1); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hf, .handle = 1, .prog_fd = __fd); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_pf, .priority = 1, .prog_fd = __fd); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpf, .handle = 1, .priority = 1, .prog_fd = __fd); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpi, .handle = 1, .priority = 1, .prog_id = 42); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpr, .handle = 1, .priority = 1, \ + .flags = BPF_TC_F_REPLACE); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpfi, .handle = 1, .priority = 1, .prog_fd = __fd, \ + .prog_id = 42); \ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_prio_max, .handle = 1, .priority = UINT16_MAX + 1); + +static int test_tc_bpf_basic(const struct bpf_tc_hook *hook, int fd) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd); + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + int ret; + + ret = bpf_obj_get_info_by_fd(fd, &info, &info_len); + if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd")) + return ret; + + ret = bpf_tc_attach(hook, &opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) + return ret; + + if (!ASSERT_EQ(opts.handle, 1, "handle set") || + !ASSERT_EQ(opts.priority, 1, "priority set") || + !ASSERT_EQ(opts.prog_id, info.id, "prog_id set")) + goto end; + + opts.prog_id = 0; + opts.flags = BPF_TC_F_REPLACE; + ret = bpf_tc_attach(hook, &opts); + if (!ASSERT_OK(ret, "bpf_tc_attach replace mode")) + goto end; + + opts.flags = opts.prog_fd = opts.prog_id = 0; + ret = bpf_tc_query(hook, &opts); + if (!ASSERT_OK(ret, "bpf_tc_query")) + goto end; + + if (!ASSERT_EQ(opts.handle, 1, "handle set") || + !ASSERT_EQ(opts.priority, 1, "priority set") || + !ASSERT_EQ(opts.prog_id, info.id, "prog_id set")) + goto end; + +end: + opts.flags = opts.prog_fd = opts.prog_id = 0; + ret = bpf_tc_detach(hook, &opts); + ASSERT_OK(ret, "bpf_tc_detach"); + return ret; +} + +static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, attach_opts, .handle = 1, .priority = 1, .prog_fd = fd); + DECLARE_LIBBPF_OPTS(bpf_tc_hook, inv_hook, .attach_point = BPF_TC_INGRESS); + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1); + int ret; + + ret = bpf_tc_hook_create(NULL); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook = NULL")) + return -EINVAL; + + /* hook ifindex = 0 */ + ret = bpf_tc_hook_create(&inv_hook); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex == 0")) + return -EINVAL; + + ret = bpf_tc_hook_destroy(&inv_hook); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex == 0")) + return -EINVAL; + + ret = bpf_tc_attach(&inv_hook, &attach_opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex == 0")) + return -EINVAL; + attach_opts.prog_id = 0; + + ret = bpf_tc_detach(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex == 0")) + return -EINVAL; + + ret = bpf_tc_query(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex == 0")) + return -EINVAL; + + /* hook ifindex < 0 */ + inv_hook.ifindex = -1; + + ret = bpf_tc_hook_create(&inv_hook); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex < 0")) + return -EINVAL; + + ret = bpf_tc_hook_destroy(&inv_hook); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex < 0")) + return -EINVAL; + + ret = bpf_tc_attach(&inv_hook, &attach_opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex < 0")) + return -EINVAL; + attach_opts.prog_id = 0; + + ret = bpf_tc_detach(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex < 0")) + return -EINVAL; + + ret = bpf_tc_query(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex < 0")) + return -EINVAL; + + inv_hook.ifindex = LO_IFINDEX; + + /* hook.attach_point invalid */ + inv_hook.attach_point = 0xabcd; + ret = bpf_tc_hook_create(&inv_hook); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook.attach_point")) + return -EINVAL; + + ret = bpf_tc_hook_destroy(&inv_hook); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook.attach_point")) + return -EINVAL; + + ret = bpf_tc_attach(&inv_hook, &attach_opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook.attach_point")) + return -EINVAL; + + ret = bpf_tc_detach(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook.attach_point")) + return -EINVAL; + + ret = bpf_tc_query(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook.attach_point")) + return -EINVAL; + + inv_hook.attach_point = BPF_TC_INGRESS; + + /* hook.attach_point valid, but parent invalid */ + inv_hook.parent = TC_H_MAKE(1UL << 16, 10); + ret = bpf_tc_hook_create(&inv_hook); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook parent")) + return -EINVAL; + + ret = bpf_tc_hook_destroy(&inv_hook); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook parent")) + return -EINVAL; + + ret = bpf_tc_attach(&inv_hook, &attach_opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent")) + return -EINVAL; + + ret = bpf_tc_detach(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent")) + return -EINVAL; + + ret = bpf_tc_query(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent")) + return -EINVAL; + + inv_hook.attach_point = BPF_TC_CUSTOM; + inv_hook.parent = 0; + /* These return EOPNOTSUPP instead of EINVAL as parent is checked after + * attach_point of the hook. + */ + ret = bpf_tc_hook_create(&inv_hook); + if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook parent")) + return -EINVAL; + + ret = bpf_tc_hook_destroy(&inv_hook); + if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook parent")) + return -EINVAL; + + ret = bpf_tc_attach(&inv_hook, &attach_opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent")) + return -EINVAL; + + ret = bpf_tc_detach(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent")) + return -EINVAL; + + ret = bpf_tc_query(&inv_hook, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent")) + return -EINVAL; + + inv_hook.attach_point = BPF_TC_INGRESS; + + /* detach */ + { + TEST_DECLARE_OPTS(fd); + + ret = bpf_tc_detach(NULL, &opts_hp); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook = NULL")) + return -EINVAL; + + ret = bpf_tc_detach(hook, NULL); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid opts = NULL")) + return -EINVAL; + + ret = bpf_tc_detach(hook, &opts_hpr); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid flags set")) + return -EINVAL; + + ret = bpf_tc_detach(hook, &opts_hpf); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_fd set")) + return -EINVAL; + + ret = bpf_tc_detach(hook, &opts_hpi); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_id set")) + return -EINVAL; + + ret = bpf_tc_detach(hook, &opts_p); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid handle unset")) + return -EINVAL; + + ret = bpf_tc_detach(hook, &opts_h); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority unset")) + return -EINVAL; + + ret = bpf_tc_detach(hook, &opts_prio_max); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority > UINT16_MAX")) + return -EINVAL; + } + + /* query */ + { + TEST_DECLARE_OPTS(fd); + + ret = bpf_tc_query(NULL, &opts); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook = NULL")) + return -EINVAL; + + ret = bpf_tc_query(hook, NULL); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid opts = NULL")) + return -EINVAL; + + ret = bpf_tc_query(hook, &opts_hpr); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid flags set")) + return -EINVAL; + + ret = bpf_tc_query(hook, &opts_hpf); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_fd set")) + return -EINVAL; + + ret = bpf_tc_query(hook, &opts_hpi); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_id set")) + return -EINVAL; + + ret = bpf_tc_query(hook, &opts_p); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid handle unset")) + return -EINVAL; + + ret = bpf_tc_query(hook, &opts_h); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority unset")) + return -EINVAL; + + ret = bpf_tc_query(hook, &opts_prio_max); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority > UINT16_MAX")) + return -EINVAL; + + /* when chain is not present, kernel returns -EINVAL */ + ret = bpf_tc_query(hook, &opts_hp); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query valid handle, priority set")) + return -EINVAL; + } + + /* attach */ + { + TEST_DECLARE_OPTS(fd); + + ret = bpf_tc_attach(NULL, &opts_hp); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook = NULL")) + return -EINVAL; + + ret = bpf_tc_attach(hook, NULL); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid opts = NULL")) + return -EINVAL; + + opts_hp.flags = 42; + ret = bpf_tc_attach(hook, &opts_hp); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid flags")) + return -EINVAL; + + ret = bpf_tc_attach(hook, NULL); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_fd unset")) + return -EINVAL; + + ret = bpf_tc_attach(hook, &opts_hpi); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_id set")) + return -EINVAL; + + ret = bpf_tc_attach(hook, &opts_pf); + if (!ASSERT_OK(ret, "bpf_tc_attach valid handle unset")) + return -EINVAL; + opts_pf.prog_fd = opts_pf.prog_id = 0; + ASSERT_OK(bpf_tc_detach(hook, &opts_pf), "bpf_tc_detach"); + + ret = bpf_tc_attach(hook, &opts_hf); + if (!ASSERT_OK(ret, "bpf_tc_attach valid priority unset")) + return -EINVAL; + opts_hf.prog_fd = opts_hf.prog_id = 0; + ASSERT_OK(bpf_tc_detach(hook, &opts_hf), "bpf_tc_detach"); + + ret = bpf_tc_attach(hook, &opts_prio_max); + if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid priority > UINT16_MAX")) + return -EINVAL; + + ret = bpf_tc_attach(hook, &opts_f); + if (!ASSERT_OK(ret, "bpf_tc_attach valid both handle and priority unset")) + return -EINVAL; + opts_f.prog_fd = opts_f.prog_id = 0; + ASSERT_OK(bpf_tc_detach(hook, &opts_f), "bpf_tc_detach"); + } + + return 0; +} + +void test_tc_bpf(void) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX, + .attach_point = BPF_TC_INGRESS); + struct test_tc_bpf *skel = NULL; + bool hook_created = false; + int cls_fd, ret; + + skel = test_tc_bpf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load")) + return; + + cls_fd = bpf_program__fd(skel->progs.cls); + + ret = bpf_tc_hook_create(&hook); + if (ret == 0) + hook_created = true; + + ret = ret == -EEXIST ? 0 : ret; + if (!ASSERT_OK(ret, "bpf_tc_hook_create(BPF_TC_INGRESS)")) + goto end; + + hook.attach_point = BPF_TC_CUSTOM; + hook.parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS); + ret = bpf_tc_hook_create(&hook); + if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook.attach_point")) + goto end; + + ret = test_tc_bpf_basic(&hook, cls_fd); + if (!ASSERT_OK(ret, "test_tc_internal ingress")) + goto end; + + ret = bpf_tc_hook_destroy(&hook); + if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook.attach_point")) + goto end; + + hook.attach_point = BPF_TC_INGRESS; + hook.parent = 0; + bpf_tc_hook_destroy(&hook); + + ret = test_tc_bpf_basic(&hook, cls_fd); + if (!ASSERT_OK(ret, "test_tc_internal ingress")) + goto end; + + bpf_tc_hook_destroy(&hook); + + hook.attach_point = BPF_TC_EGRESS; + ret = test_tc_bpf_basic(&hook, cls_fd); + if (!ASSERT_OK(ret, "test_tc_internal egress")) + goto end; + + bpf_tc_hook_destroy(&hook); + + ret = test_tc_bpf_api(&hook, cls_fd); + if (!ASSERT_OK(ret, "test_tc_bpf_api")) + goto end; + + bpf_tc_hook_destroy(&hook); + +end: + if (hook_created) { + hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; + bpf_tc_hook_destroy(&hook); + } + test_tc_bpf__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c new file mode 100644 index 000000000..cb6a53b3e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -0,0 +1,1149 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link + * between src and dst. The netns fwd has veth links to each src and dst. The + * client is in src and server in dst. The test installs a TC BPF program to each + * host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the + * neigh addr population and redirect or ii) bpf_redirect_peer() for namespace + * switch from ingress side; it also installs a checker prog on the egress side + * to drop unexpected traffic. + */ + +#include <arpa/inet.h> +#include <linux/if.h> +#include <linux/if_tun.h> +#include <linux/limits.h> +#include <linux/sysctl.h> +#include <linux/time_types.h> +#include <linux/net_tstamp.h> +#include <stdbool.h> +#include <stdio.h> +#include <sys/stat.h> +#include <unistd.h> + +#include "test_progs.h" +#include "network_helpers.h" +#include "test_tc_neigh_fib.skel.h" +#include "test_tc_neigh.skel.h" +#include "test_tc_peer.skel.h" +#include "test_tc_dtime.skel.h" + +#ifndef TCP_TX_DELAY +#define TCP_TX_DELAY 37 +#endif + +#define NS_SRC "ns_src" +#define NS_FWD "ns_fwd" +#define NS_DST "ns_dst" + +#define IP4_SRC "172.16.1.100" +#define IP4_DST "172.16.2.100" +#define IP4_TUN_SRC "172.17.1.100" +#define IP4_TUN_FWD "172.17.1.200" +#define IP4_PORT 9004 + +#define IP6_SRC "0::1:dead:beef:cafe" +#define IP6_DST "0::2:dead:beef:cafe" +#define IP6_TUN_SRC "1::1:dead:beef:cafe" +#define IP6_TUN_FWD "1::2:dead:beef:cafe" +#define IP6_PORT 9006 + +#define IP4_SLL "169.254.0.1" +#define IP4_DLL "169.254.0.2" +#define IP4_NET "169.254.0.0" + +#define MAC_DST_FWD "00:11:22:33:44:55" +#define MAC_DST "00:22:33:44:55:66" + +#define IFADDR_STR_LEN 18 +#define PING_ARGS "-i 0.2 -c 3 -w 10 -q" + +#define SRC_PROG_PIN_FILE "/sys/fs/bpf/test_tc_src" +#define DST_PROG_PIN_FILE "/sys/fs/bpf/test_tc_dst" +#define CHK_PROG_PIN_FILE "/sys/fs/bpf/test_tc_chk" + +#define TIMEOUT_MILLIS 10000 +#define NSEC_PER_SEC 1000000000ULL + +#define log_err(MSG, ...) \ + fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ + __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__) + +static const char * const namespaces[] = {NS_SRC, NS_FWD, NS_DST, NULL}; + +static int write_file(const char *path, const char *newval) +{ + FILE *f; + + f = fopen(path, "r+"); + if (!f) + return -1; + if (fwrite(newval, strlen(newval), 1, f) != 1) { + log_err("writing to %s failed", path); + fclose(f); + return -1; + } + fclose(f); + return 0; +} + +static int netns_setup_namespaces(const char *verb) +{ + const char * const *ns = namespaces; + char cmd[128]; + + while (*ns) { + snprintf(cmd, sizeof(cmd), "ip netns %s %s", verb, *ns); + if (!ASSERT_OK(system(cmd), cmd)) + return -1; + ns++; + } + return 0; +} + +static void netns_setup_namespaces_nofail(const char *verb) +{ + const char * const *ns = namespaces; + char cmd[128]; + + while (*ns) { + snprintf(cmd, sizeof(cmd), "ip netns %s %s > /dev/null 2>&1", verb, *ns); + system(cmd); + ns++; + } +} + +struct netns_setup_result { + int ifindex_veth_src_fwd; + int ifindex_veth_dst_fwd; +}; + +static int get_ifaddr(const char *name, char *ifaddr) +{ + char path[PATH_MAX]; + FILE *f; + int ret; + + snprintf(path, PATH_MAX, "/sys/class/net/%s/address", name); + f = fopen(path, "r"); + if (!ASSERT_OK_PTR(f, path)) + return -1; + + ret = fread(ifaddr, 1, IFADDR_STR_LEN, f); + if (!ASSERT_EQ(ret, IFADDR_STR_LEN, "fread ifaddr")) { + fclose(f); + return -1; + } + fclose(f); + return 0; +} + +static int get_ifindex(const char *name) +{ + char path[PATH_MAX]; + char buf[32]; + FILE *f; + int ret; + + snprintf(path, PATH_MAX, "/sys/class/net/%s/ifindex", name); + f = fopen(path, "r"); + if (!ASSERT_OK_PTR(f, path)) + return -1; + + ret = fread(buf, 1, sizeof(buf), f); + if (!ASSERT_GT(ret, 0, "fread ifindex")) { + fclose(f); + return -1; + } + fclose(f); + return atoi(buf); +} + +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto fail; \ + }) + +static int netns_setup_links_and_routes(struct netns_setup_result *result) +{ + struct nstoken *nstoken = NULL; + char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {}; + + SYS("ip link add veth_src type veth peer name veth_src_fwd"); + SYS("ip link add veth_dst type veth peer name veth_dst_fwd"); + + SYS("ip link set veth_dst_fwd address " MAC_DST_FWD); + SYS("ip link set veth_dst address " MAC_DST); + + if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr)) + goto fail; + + result->ifindex_veth_src_fwd = get_ifindex("veth_src_fwd"); + if (result->ifindex_veth_src_fwd < 0) + goto fail; + result->ifindex_veth_dst_fwd = get_ifindex("veth_dst_fwd"); + if (result->ifindex_veth_dst_fwd < 0) + goto fail; + + SYS("ip link set veth_src netns " NS_SRC); + SYS("ip link set veth_src_fwd netns " NS_FWD); + SYS("ip link set veth_dst_fwd netns " NS_FWD); + SYS("ip link set veth_dst netns " NS_DST); + + /** setup in 'src' namespace */ + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto fail; + + SYS("ip addr add " IP4_SRC "/32 dev veth_src"); + SYS("ip addr add " IP6_SRC "/128 dev veth_src nodad"); + SYS("ip link set dev veth_src up"); + + SYS("ip route add " IP4_DST "/32 dev veth_src scope global"); + SYS("ip route add " IP4_NET "/16 dev veth_src scope global"); + SYS("ip route add " IP6_DST "/128 dev veth_src scope global"); + + SYS("ip neigh add " IP4_DST " dev veth_src lladdr %s", + veth_src_fwd_addr); + SYS("ip neigh add " IP6_DST " dev veth_src lladdr %s", + veth_src_fwd_addr); + + close_netns(nstoken); + + /** setup in 'fwd' namespace */ + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + goto fail; + + /* The fwd netns automatically gets a v6 LL address / routes, but also + * needs v4 one in order to start ARP probing. IP4_NET route is added + * to the endpoints so that the ARP processing will reply. + */ + SYS("ip addr add " IP4_SLL "/32 dev veth_src_fwd"); + SYS("ip addr add " IP4_DLL "/32 dev veth_dst_fwd"); + SYS("ip link set dev veth_src_fwd up"); + SYS("ip link set dev veth_dst_fwd up"); + + SYS("ip route add " IP4_SRC "/32 dev veth_src_fwd scope global"); + SYS("ip route add " IP6_SRC "/128 dev veth_src_fwd scope global"); + SYS("ip route add " IP4_DST "/32 dev veth_dst_fwd scope global"); + SYS("ip route add " IP6_DST "/128 dev veth_dst_fwd scope global"); + + close_netns(nstoken); + + /** setup in 'dst' namespace */ + nstoken = open_netns(NS_DST); + if (!ASSERT_OK_PTR(nstoken, "setns dst")) + goto fail; + + SYS("ip addr add " IP4_DST "/32 dev veth_dst"); + SYS("ip addr add " IP6_DST "/128 dev veth_dst nodad"); + SYS("ip link set dev veth_dst up"); + + SYS("ip route add " IP4_SRC "/32 dev veth_dst scope global"); + SYS("ip route add " IP4_NET "/16 dev veth_dst scope global"); + SYS("ip route add " IP6_SRC "/128 dev veth_dst scope global"); + + SYS("ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD); + SYS("ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD); + + close_netns(nstoken); + + return 0; +fail: + if (nstoken) + close_netns(nstoken); + return -1; +} + +static int netns_load_bpf(void) +{ + SYS("tc qdisc add dev veth_src_fwd clsact"); + SYS("tc filter add dev veth_src_fwd ingress bpf da object-pinned " + SRC_PROG_PIN_FILE); + SYS("tc filter add dev veth_src_fwd egress bpf da object-pinned " + CHK_PROG_PIN_FILE); + + SYS("tc qdisc add dev veth_dst_fwd clsact"); + SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned " + DST_PROG_PIN_FILE); + SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned " + CHK_PROG_PIN_FILE); + + return 0; +fail: + return -1; +} + +static void test_tcp(int family, const char *addr, __u16 port) +{ + int listen_fd = -1, accept_fd = -1, client_fd = -1; + char buf[] = "testing testing"; + int n; + struct nstoken *nstoken; + + nstoken = open_netns(NS_DST); + if (!ASSERT_OK_PTR(nstoken, "setns dst")) + return; + + listen_fd = start_server(family, SOCK_STREAM, addr, port, 0); + if (!ASSERT_GE(listen_fd, 0, "listen")) + goto done; + + close_netns(nstoken); + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + + client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS); + if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) + goto done; + + accept_fd = accept(listen_fd, NULL, NULL); + if (!ASSERT_GE(accept_fd, 0, "accept")) + goto done; + + if (!ASSERT_OK(settimeo(accept_fd, TIMEOUT_MILLIS), "settimeo")) + goto done; + + n = write(client_fd, buf, sizeof(buf)); + if (!ASSERT_EQ(n, sizeof(buf), "send to server")) + goto done; + + n = read(accept_fd, buf, sizeof(buf)); + ASSERT_EQ(n, sizeof(buf), "recv from server"); + +done: + if (nstoken) + close_netns(nstoken); + if (listen_fd >= 0) + close(listen_fd); + if (accept_fd >= 0) + close(accept_fd); + if (client_fd >= 0) + close(client_fd); +} + +static int test_ping(int family, const char *addr) +{ + SYS("ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping_command(family), addr); + return 0; +fail: + return -1; +} + +static void test_connectivity(void) +{ + test_tcp(AF_INET, IP4_DST, IP4_PORT); + test_ping(AF_INET, IP4_DST); + test_tcp(AF_INET6, IP6_DST, IP6_PORT); + test_ping(AF_INET6, IP6_DST); +} + +static int set_forwarding(bool enable) +{ + int err; + + err = write_file("/proc/sys/net/ipv4/ip_forward", enable ? "1" : "0"); + if (!ASSERT_OK(err, "set ipv4.ip_forward=0")) + return err; + + err = write_file("/proc/sys/net/ipv6/conf/all/forwarding", enable ? "1" : "0"); + if (!ASSERT_OK(err, "set ipv6.forwarding=0")) + return err; + + return 0; +} + +static void rcv_tstamp(int fd, const char *expected, size_t s) +{ + struct __kernel_timespec pkt_ts = {}; + char ctl[CMSG_SPACE(sizeof(pkt_ts))]; + struct timespec now_ts; + struct msghdr msg = {}; + __u64 now_ns, pkt_ns; + struct cmsghdr *cmsg; + struct iovec iov; + char data[32]; + int ret; + + iov.iov_base = data; + iov.iov_len = sizeof(data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = &ctl; + msg.msg_controllen = sizeof(ctl); + + ret = recvmsg(fd, &msg, 0); + if (!ASSERT_EQ(ret, s, "recvmsg")) + return; + ASSERT_STRNEQ(data, expected, s, "expected rcv data"); + + cmsg = CMSG_FIRSTHDR(&msg); + if (cmsg && cmsg->cmsg_level == SOL_SOCKET && + cmsg->cmsg_type == SO_TIMESTAMPNS_NEW) + memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts)); + + pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec; + ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp"); + + ret = clock_gettime(CLOCK_REALTIME, &now_ts); + ASSERT_OK(ret, "clock_gettime"); + now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec; + + if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp")) + ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC, + "check rcv tstamp"); +} + +static void snd_tstamp(int fd, char *b, size_t s) +{ + struct sock_txtime opt = { .clockid = CLOCK_TAI }; + char ctl[CMSG_SPACE(sizeof(__u64))]; + struct timespec now_ts; + struct msghdr msg = {}; + struct cmsghdr *cmsg; + struct iovec iov; + __u64 now_ns; + int ret; + + ret = clock_gettime(CLOCK_TAI, &now_ts); + ASSERT_OK(ret, "clock_get_time(CLOCK_TAI)"); + now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec; + + iov.iov_base = b; + iov.iov_len = s; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = &ctl; + msg.msg_controllen = sizeof(ctl); + + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_TXTIME; + cmsg->cmsg_len = CMSG_LEN(sizeof(now_ns)); + *(__u64 *)CMSG_DATA(cmsg) = now_ns; + + ret = setsockopt(fd, SOL_SOCKET, SO_TXTIME, &opt, sizeof(opt)); + ASSERT_OK(ret, "setsockopt(SO_TXTIME)"); + + ret = sendmsg(fd, &msg, 0); + ASSERT_EQ(ret, s, "sendmsg"); +} + +static void test_inet_dtime(int family, int type, const char *addr, __u16 port) +{ + int opt = 1, accept_fd = -1, client_fd = -1, listen_fd, err; + char buf[] = "testing testing"; + struct nstoken *nstoken; + + nstoken = open_netns(NS_DST); + if (!ASSERT_OK_PTR(nstoken, "setns dst")) + return; + listen_fd = start_server(family, type, addr, port, 0); + close_netns(nstoken); + + if (!ASSERT_GE(listen_fd, 0, "listen")) + return; + + /* Ensure the kernel puts the (rcv) timestamp for all skb */ + err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, + &opt, sizeof(opt)); + if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) + goto done; + + if (type == SOCK_STREAM) { + /* Ensure the kernel set EDT when sending out rst/ack + * from the kernel's ctl_sk. + */ + err = setsockopt(listen_fd, SOL_TCP, TCP_TX_DELAY, &opt, + sizeof(opt)); + if (!ASSERT_OK(err, "setsockopt(TCP_TX_DELAY)")) + goto done; + } + + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS); + close_netns(nstoken); + + if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) + goto done; + + if (type == SOCK_STREAM) { + int n; + + accept_fd = accept(listen_fd, NULL, NULL); + if (!ASSERT_GE(accept_fd, 0, "accept")) + goto done; + + n = write(client_fd, buf, sizeof(buf)); + if (!ASSERT_EQ(n, sizeof(buf), "send to server")) + goto done; + rcv_tstamp(accept_fd, buf, sizeof(buf)); + } else { + snd_tstamp(client_fd, buf, sizeof(buf)); + rcv_tstamp(listen_fd, buf, sizeof(buf)); + } + +done: + close(listen_fd); + if (accept_fd != -1) + close(accept_fd); + if (client_fd != -1) + close(client_fd); +} + +static int netns_load_dtime_bpf(struct test_tc_dtime *skel) +{ + struct nstoken *nstoken; + +#define PIN_FNAME(__file) "/sys/fs/bpf/" #__file +#define PIN(__prog) ({ \ + int err = bpf_program__pin(skel->progs.__prog, PIN_FNAME(__prog)); \ + if (!ASSERT_OK(err, "pin " #__prog)) \ + goto fail; \ + }) + + /* setup ns_src tc progs */ + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC)) + return -1; + PIN(egress_host); + PIN(ingress_host); + SYS("tc qdisc add dev veth_src clsact"); + SYS("tc filter add dev veth_src ingress bpf da object-pinned " + PIN_FNAME(ingress_host)); + SYS("tc filter add dev veth_src egress bpf da object-pinned " + PIN_FNAME(egress_host)); + close_netns(nstoken); + + /* setup ns_dst tc progs */ + nstoken = open_netns(NS_DST); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_DST)) + return -1; + PIN(egress_host); + PIN(ingress_host); + SYS("tc qdisc add dev veth_dst clsact"); + SYS("tc filter add dev veth_dst ingress bpf da object-pinned " + PIN_FNAME(ingress_host)); + SYS("tc filter add dev veth_dst egress bpf da object-pinned " + PIN_FNAME(egress_host)); + close_netns(nstoken); + + /* setup ns_fwd tc progs */ + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD)) + return -1; + PIN(ingress_fwdns_prio100); + PIN(egress_fwdns_prio100); + PIN(ingress_fwdns_prio101); + PIN(egress_fwdns_prio101); + SYS("tc qdisc add dev veth_dst_fwd clsact"); + SYS("tc filter add dev veth_dst_fwd ingress prio 100 bpf da object-pinned " + PIN_FNAME(ingress_fwdns_prio100)); + SYS("tc filter add dev veth_dst_fwd ingress prio 101 bpf da object-pinned " + PIN_FNAME(ingress_fwdns_prio101)); + SYS("tc filter add dev veth_dst_fwd egress prio 100 bpf da object-pinned " + PIN_FNAME(egress_fwdns_prio100)); + SYS("tc filter add dev veth_dst_fwd egress prio 101 bpf da object-pinned " + PIN_FNAME(egress_fwdns_prio101)); + SYS("tc qdisc add dev veth_src_fwd clsact"); + SYS("tc filter add dev veth_src_fwd ingress prio 100 bpf da object-pinned " + PIN_FNAME(ingress_fwdns_prio100)); + SYS("tc filter add dev veth_src_fwd ingress prio 101 bpf da object-pinned " + PIN_FNAME(ingress_fwdns_prio101)); + SYS("tc filter add dev veth_src_fwd egress prio 100 bpf da object-pinned " + PIN_FNAME(egress_fwdns_prio100)); + SYS("tc filter add dev veth_src_fwd egress prio 101 bpf da object-pinned " + PIN_FNAME(egress_fwdns_prio101)); + close_netns(nstoken); + +#undef PIN + + return 0; + +fail: + close_netns(nstoken); + return -1; +} + +enum { + INGRESS_FWDNS_P100, + INGRESS_FWDNS_P101, + EGRESS_FWDNS_P100, + EGRESS_FWDNS_P101, + INGRESS_ENDHOST, + EGRESS_ENDHOST, + SET_DTIME, + __MAX_CNT, +}; + +const char *cnt_names[] = { + "ingress_fwdns_p100", + "ingress_fwdns_p101", + "egress_fwdns_p100", + "egress_fwdns_p101", + "ingress_endhost", + "egress_endhost", + "set_dtime", +}; + +enum { + TCP_IP6_CLEAR_DTIME, + TCP_IP4, + TCP_IP6, + UDP_IP4, + UDP_IP6, + TCP_IP4_RT_FWD, + TCP_IP6_RT_FWD, + UDP_IP4_RT_FWD, + UDP_IP6_RT_FWD, + UKN_TEST, + __NR_TESTS, +}; + +const char *test_names[] = { + "tcp ip6 clear dtime", + "tcp ip4", + "tcp ip6", + "udp ip4", + "udp ip6", + "tcp ip4 rt fwd", + "tcp ip6 rt fwd", + "udp ip4 rt fwd", + "udp ip6 rt fwd", +}; + +static const char *dtime_cnt_str(int test, int cnt) +{ + static char name[64]; + + snprintf(name, sizeof(name), "%s %s", test_names[test], cnt_names[cnt]); + + return name; +} + +static const char *dtime_err_str(int test, int cnt) +{ + static char name[64]; + + snprintf(name, sizeof(name), "%s %s errs", test_names[test], + cnt_names[cnt]); + + return name; +} + +static void test_tcp_clear_dtime(struct test_tc_dtime *skel) +{ + int i, t = TCP_IP6_CLEAR_DTIME; + __u32 *dtimes = skel->bss->dtimes[t]; + __u32 *errs = skel->bss->errs[t]; + + skel->bss->test = t; + test_inet_dtime(AF_INET6, SOCK_STREAM, IP6_DST, 50000 + t); + + ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P100)); + ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P101)); + ASSERT_GT(dtimes[EGRESS_FWDNS_P100], 0, + dtime_cnt_str(t, EGRESS_FWDNS_P100)); + ASSERT_EQ(dtimes[EGRESS_FWDNS_P101], 0, + dtime_cnt_str(t, EGRESS_FWDNS_P101)); + ASSERT_GT(dtimes[EGRESS_ENDHOST], 0, + dtime_cnt_str(t, EGRESS_ENDHOST)); + ASSERT_GT(dtimes[INGRESS_ENDHOST], 0, + dtime_cnt_str(t, INGRESS_ENDHOST)); + + for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++) + ASSERT_EQ(errs[i], 0, dtime_err_str(t, i)); +} + +static void test_tcp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd) +{ + __u32 *dtimes, *errs; + const char *addr; + int i, t; + + if (family == AF_INET) { + t = bpf_fwd ? TCP_IP4 : TCP_IP4_RT_FWD; + addr = IP4_DST; + } else { + t = bpf_fwd ? TCP_IP6 : TCP_IP6_RT_FWD; + addr = IP6_DST; + } + + dtimes = skel->bss->dtimes[t]; + errs = skel->bss->errs[t]; + + skel->bss->test = t; + test_inet_dtime(family, SOCK_STREAM, addr, 50000 + t); + + /* fwdns_prio100 prog does not read delivery_time_type, so + * kernel puts the (rcv) timetamp in __sk_buff->tstamp + */ + ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P100)); + for (i = INGRESS_FWDNS_P101; i < SET_DTIME; i++) + ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i)); + + for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++) + ASSERT_EQ(errs[i], 0, dtime_err_str(t, i)); +} + +static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd) +{ + __u32 *dtimes, *errs; + const char *addr; + int i, t; + + if (family == AF_INET) { + t = bpf_fwd ? UDP_IP4 : UDP_IP4_RT_FWD; + addr = IP4_DST; + } else { + t = bpf_fwd ? UDP_IP6 : UDP_IP6_RT_FWD; + addr = IP6_DST; + } + + dtimes = skel->bss->dtimes[t]; + errs = skel->bss->errs[t]; + + skel->bss->test = t; + test_inet_dtime(family, SOCK_DGRAM, addr, 50000 + t); + + ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P100)); + /* non mono delivery time is not forwarded */ + ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P101)); + for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++) + ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i)); + + for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++) + ASSERT_EQ(errs[i], 0, dtime_err_str(t, i)); +} + +static void test_tc_redirect_dtime(struct netns_setup_result *setup_result) +{ + struct test_tc_dtime *skel; + struct nstoken *nstoken; + int err; + + skel = test_tc_dtime__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open")) + return; + + skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + + err = test_tc_dtime__load(skel); + if (!ASSERT_OK(err, "test_tc_dtime__load")) + goto done; + + if (netns_load_dtime_bpf(skel)) + goto done; + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + goto done; + err = set_forwarding(false); + close_netns(nstoken); + if (!ASSERT_OK(err, "disable forwarding")) + goto done; + + test_tcp_clear_dtime(skel); + + test_tcp_dtime(skel, AF_INET, true); + test_tcp_dtime(skel, AF_INET6, true); + test_udp_dtime(skel, AF_INET, true); + test_udp_dtime(skel, AF_INET6, true); + + /* Test the kernel ip[6]_forward path instead + * of bpf_redirect_neigh(). + */ + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + goto done; + err = set_forwarding(true); + close_netns(nstoken); + if (!ASSERT_OK(err, "enable forwarding")) + goto done; + + test_tcp_dtime(skel, AF_INET, false); + test_tcp_dtime(skel, AF_INET6, false); + test_udp_dtime(skel, AF_INET, false); + test_udp_dtime(skel, AF_INET6, false); + +done: + test_tc_dtime__destroy(skel); +} + +static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result) +{ + struct nstoken *nstoken = NULL; + struct test_tc_neigh_fib *skel = NULL; + int err; + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + return; + + skel = test_tc_neigh_fib__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_neigh_fib__open")) + goto done; + + if (!ASSERT_OK(test_tc_neigh_fib__load(skel), "test_tc_neigh_fib__load")) + goto done; + + err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE)) + goto done; + + if (netns_load_bpf()) + goto done; + + /* bpf_fib_lookup() checks if forwarding is enabled */ + if (!ASSERT_OK(set_forwarding(true), "enable forwarding")) + goto done; + + test_connectivity(); + +done: + if (skel) + test_tc_neigh_fib__destroy(skel); + close_netns(nstoken); +} + +static void test_tc_redirect_neigh(struct netns_setup_result *setup_result) +{ + struct nstoken *nstoken = NULL; + struct test_tc_neigh *skel = NULL; + int err; + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + return; + + skel = test_tc_neigh__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open")) + goto done; + + skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + + err = test_tc_neigh__load(skel); + if (!ASSERT_OK(err, "test_tc_neigh__load")) + goto done; + + err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE)) + goto done; + + if (netns_load_bpf()) + goto done; + + if (!ASSERT_OK(set_forwarding(false), "disable forwarding")) + goto done; + + test_connectivity(); + +done: + if (skel) + test_tc_neigh__destroy(skel); + close_netns(nstoken); +} + +static void test_tc_redirect_peer(struct netns_setup_result *setup_result) +{ + struct nstoken *nstoken; + struct test_tc_peer *skel; + int err; + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + return; + + skel = test_tc_peer__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_peer__open")) + goto done; + + skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + + err = test_tc_peer__load(skel); + if (!ASSERT_OK(err, "test_tc_peer__load")) + goto done; + + err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE)) + goto done; + + if (netns_load_bpf()) + goto done; + + if (!ASSERT_OK(set_forwarding(false), "disable forwarding")) + goto done; + + test_connectivity(); + +done: + if (skel) + test_tc_peer__destroy(skel); + close_netns(nstoken); +} + +static int tun_open(char *name) +{ + struct ifreq ifr; + int fd, err; + + fd = open("/dev/net/tun", O_RDWR); + if (!ASSERT_GE(fd, 0, "open /dev/net/tun")) + return -1; + + memset(&ifr, 0, sizeof(ifr)); + + ifr.ifr_flags = IFF_TUN | IFF_NO_PI; + if (*name) + strncpy(ifr.ifr_name, name, IFNAMSIZ); + + err = ioctl(fd, TUNSETIFF, &ifr); + if (!ASSERT_OK(err, "ioctl TUNSETIFF")) + goto fail; + + SYS("ip link set dev %s up", name); + + return fd; +fail: + close(fd); + return -1; +} + +enum { + SRC_TO_TARGET = 0, + TARGET_TO_SRC = 1, +}; + +static int tun_relay_loop(int src_fd, int target_fd) +{ + fd_set rfds, wfds; + + FD_ZERO(&rfds); + FD_ZERO(&wfds); + + for (;;) { + char buf[1500]; + int direction, nread, nwrite; + + FD_SET(src_fd, &rfds); + FD_SET(target_fd, &rfds); + + if (select(1 + MAX(src_fd, target_fd), &rfds, NULL, NULL, NULL) < 0) { + log_err("select failed"); + return 1; + } + + direction = FD_ISSET(src_fd, &rfds) ? SRC_TO_TARGET : TARGET_TO_SRC; + + nread = read(direction == SRC_TO_TARGET ? src_fd : target_fd, buf, sizeof(buf)); + if (nread < 0) { + log_err("read failed"); + return 1; + } + + nwrite = write(direction == SRC_TO_TARGET ? target_fd : src_fd, buf, nread); + if (nwrite != nread) { + log_err("write failed"); + return 1; + } + } +} + +static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result) +{ + struct test_tc_peer *skel = NULL; + struct nstoken *nstoken = NULL; + int err; + int tunnel_pid = -1; + int src_fd, target_fd = -1; + int ifindex; + + /* Start a L3 TUN/TAP tunnel between the src and dst namespaces. + * This test is using TUN/TAP instead of e.g. IPIP or GRE tunnel as those + * expose the L2 headers encapsulating the IP packet to BPF and hence + * don't have skb in suitable state for this test. Alternative to TUN/TAP + * would be e.g. Wireguard which would appear as a pure L3 device to BPF, + * but that requires much more complicated setup. + */ + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC)) + return; + + src_fd = tun_open("tun_src"); + if (!ASSERT_GE(src_fd, 0, "tun_open tun_src")) + goto fail; + + close_netns(nstoken); + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD)) + goto fail; + + target_fd = tun_open("tun_fwd"); + if (!ASSERT_GE(target_fd, 0, "tun_open tun_fwd")) + goto fail; + + tunnel_pid = fork(); + if (!ASSERT_GE(tunnel_pid, 0, "fork tun_relay_loop")) + goto fail; + + if (tunnel_pid == 0) + exit(tun_relay_loop(src_fd, target_fd)); + + skel = test_tc_peer__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_peer__open")) + goto fail; + + ifindex = get_ifindex("tun_fwd"); + if (!ASSERT_GE(ifindex, 0, "get_ifindex tun_fwd")) + goto fail; + + skel->rodata->IFINDEX_SRC = ifindex; + skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + + err = test_tc_peer__load(skel); + if (!ASSERT_OK(err, "test_tc_peer__load")) + goto fail; + + err = bpf_program__pin(skel->progs.tc_src_l3, SRC_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE)) + goto fail; + + err = bpf_program__pin(skel->progs.tc_dst_l3, DST_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE)) + goto fail; + + err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE)) + goto fail; + + /* Load "tc_src_l3" to the tun_fwd interface to redirect packets + * towards dst, and "tc_dst" to redirect packets + * and "tc_chk" on veth_dst_fwd to drop non-redirected packets. + */ + SYS("tc qdisc add dev tun_fwd clsact"); + SYS("tc filter add dev tun_fwd ingress bpf da object-pinned " + SRC_PROG_PIN_FILE); + + SYS("tc qdisc add dev veth_dst_fwd clsact"); + SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned " + DST_PROG_PIN_FILE); + SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned " + CHK_PROG_PIN_FILE); + + /* Setup route and neigh tables */ + SYS("ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24"); + SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP4_TUN_FWD "/24"); + + SYS("ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad"); + SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad"); + + SYS("ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global"); + SYS("ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD + " dev tun_src scope global"); + SYS("ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global"); + SYS("ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global"); + SYS("ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD + " dev tun_src scope global"); + SYS("ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global"); + + SYS("ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); + SYS("ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); + + if (!ASSERT_OK(set_forwarding(false), "disable forwarding")) + goto fail; + + test_connectivity(); + +fail: + if (tunnel_pid > 0) { + kill(tunnel_pid, SIGTERM); + waitpid(tunnel_pid, NULL, 0); + } + if (src_fd >= 0) + close(src_fd); + if (target_fd >= 0) + close(target_fd); + if (skel) + test_tc_peer__destroy(skel); + if (nstoken) + close_netns(nstoken); +} + +#define RUN_TEST(name) \ + ({ \ + struct netns_setup_result setup_result; \ + if (test__start_subtest(#name)) \ + if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \ + if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \ + "setup links and routes")) \ + test_ ## name(&setup_result); \ + netns_setup_namespaces("delete"); \ + } \ + }) + +static void *test_tc_redirect_run_tests(void *arg) +{ + netns_setup_namespaces_nofail("delete"); + + RUN_TEST(tc_redirect_peer); + RUN_TEST(tc_redirect_peer_l3); + RUN_TEST(tc_redirect_neigh); + RUN_TEST(tc_redirect_neigh_fib); + RUN_TEST(tc_redirect_dtime); + return NULL; +} + +void serial_test_tc_redirect(void) +{ + pthread_t test_thread; + int err; + + /* Run the tests in their own thread to isolate the namespace changes + * so they do not affect the environment of other tests. + * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) + */ + err = pthread_create(&test_thread, NULL, &test_tc_redirect_run_tests, NULL); + if (ASSERT_OK(err, "pthread_create")) + ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c new file mode 100644 index 000000000..e070bca2b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_tcp_estats(void) +{ + const char *file = "./test_tcp_estats.bpf.o"; + int err, prog_fd; + struct bpf_object *obj; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (!ASSERT_OK(err, "")) + return; + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c new file mode 100644 index 000000000..571917735 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#define _GNU_SOURCE +#include <sched.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/socket.h> +#include <linux/compiler.h> + +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "test_tcp_hdr_options.h" +#include "test_tcp_hdr_options.skel.h" +#include "test_misc_tcp_hdr_options.skel.h" + +#define LO_ADDR6 "::1" +#define CG_NAME "/tcpbpf-hdr-opt-test" + +static struct bpf_test_option exp_passive_estab_in; +static struct bpf_test_option exp_active_estab_in; +static struct bpf_test_option exp_passive_fin_in; +static struct bpf_test_option exp_active_fin_in; +static struct hdr_stg exp_passive_hdr_stg; +static struct hdr_stg exp_active_hdr_stg = { .active = true, }; + +static struct test_misc_tcp_hdr_options *misc_skel; +static struct test_tcp_hdr_options *skel; +static int lport_linum_map_fd; +static int hdr_stg_map_fd; +static __u32 duration; +static int cg_fd; + +struct sk_fds { + int srv_fd; + int passive_fd; + int active_fd; + int passive_lport; + int active_lport; +}; + +static int create_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return -1; + + if (!ASSERT_OK(system("ip link set dev lo up"), "run ip cmd")) + return -1; + + return 0; +} + +static void print_hdr_stg(const struct hdr_stg *hdr_stg, const char *prefix) +{ + fprintf(stderr, "%s{active:%u, resend_syn:%u, syncookie:%u, fastopen:%u}\n", + prefix ? : "", hdr_stg->active, hdr_stg->resend_syn, + hdr_stg->syncookie, hdr_stg->fastopen); +} + +static void print_option(const struct bpf_test_option *opt, const char *prefix) +{ + fprintf(stderr, "%s{flags:0x%x, max_delack_ms:%u, rand:0x%x}\n", + prefix ? : "", opt->flags, opt->max_delack_ms, opt->rand); +} + +static void sk_fds_close(struct sk_fds *sk_fds) +{ + close(sk_fds->srv_fd); + close(sk_fds->passive_fd); + close(sk_fds->active_fd); +} + +static int sk_fds_shutdown(struct sk_fds *sk_fds) +{ + int ret, abyte; + + shutdown(sk_fds->active_fd, SHUT_WR); + ret = read(sk_fds->passive_fd, &abyte, sizeof(abyte)); + if (!ASSERT_EQ(ret, 0, "read-after-shutdown(passive_fd):")) + return -1; + + shutdown(sk_fds->passive_fd, SHUT_WR); + ret = read(sk_fds->active_fd, &abyte, sizeof(abyte)); + if (!ASSERT_EQ(ret, 0, "read-after-shutdown(active_fd):")) + return -1; + + return 0; +} + +static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open) +{ + const char fast[] = "FAST!!!"; + struct sockaddr_in6 addr6; + socklen_t len; + + sk_fds->srv_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0); + if (!ASSERT_NEQ(sk_fds->srv_fd, -1, "start_server")) + goto error; + + if (fast_open) + sk_fds->active_fd = fastopen_connect(sk_fds->srv_fd, fast, + sizeof(fast), 0); + else + sk_fds->active_fd = connect_to_fd(sk_fds->srv_fd, 0); + + if (!ASSERT_NEQ(sk_fds->active_fd, -1, "")) { + close(sk_fds->srv_fd); + goto error; + } + + len = sizeof(addr6); + if (!ASSERT_OK(getsockname(sk_fds->srv_fd, (struct sockaddr *)&addr6, + &len), "getsockname(srv_fd)")) + goto error_close; + sk_fds->passive_lport = ntohs(addr6.sin6_port); + + len = sizeof(addr6); + if (!ASSERT_OK(getsockname(sk_fds->active_fd, (struct sockaddr *)&addr6, + &len), "getsockname(active_fd)")) + goto error_close; + sk_fds->active_lport = ntohs(addr6.sin6_port); + + sk_fds->passive_fd = accept(sk_fds->srv_fd, NULL, 0); + if (!ASSERT_NEQ(sk_fds->passive_fd, -1, "accept(srv_fd)")) + goto error_close; + + if (fast_open) { + char bytes_in[sizeof(fast)]; + int ret; + + ret = read(sk_fds->passive_fd, bytes_in, sizeof(bytes_in)); + if (!ASSERT_EQ(ret, sizeof(fast), "read fastopen syn data")) { + close(sk_fds->passive_fd); + goto error_close; + } + } + + return 0; + +error_close: + close(sk_fds->active_fd); + close(sk_fds->srv_fd); + +error: + memset(sk_fds, -1, sizeof(*sk_fds)); + return -1; +} + +static int check_hdr_opt(const struct bpf_test_option *exp, + const struct bpf_test_option *act, + const char *hdr_desc) +{ + if (!ASSERT_OK(memcmp(exp, act, sizeof(*exp)), hdr_desc)) { + print_option(exp, "expected: "); + print_option(act, " actual: "); + return -1; + } + + return 0; +} + +static int check_hdr_stg(const struct hdr_stg *exp, int fd, + const char *stg_desc) +{ + struct hdr_stg act; + + if (!ASSERT_OK(bpf_map_lookup_elem(hdr_stg_map_fd, &fd, &act), + "map_lookup(hdr_stg_map_fd)")) + return -1; + + if (!ASSERT_OK(memcmp(exp, &act, sizeof(*exp)), stg_desc)) { + print_hdr_stg(exp, "expected: "); + print_hdr_stg(&act, " actual: "); + return -1; + } + + return 0; +} + +static int check_error_linum(const struct sk_fds *sk_fds) +{ + unsigned int nr_errors = 0; + struct linum_err linum_err; + int lport; + + lport = sk_fds->passive_lport; + if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) { + fprintf(stderr, + "bpf prog error out at lport:passive(%d), linum:%u err:%d\n", + lport, linum_err.linum, linum_err.err); + nr_errors++; + } + + lport = sk_fds->active_lport; + if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) { + fprintf(stderr, + "bpf prog error out at lport:active(%d), linum:%u err:%d\n", + lport, linum_err.linum, linum_err.err); + nr_errors++; + } + + return nr_errors; +} + +static void check_hdr_and_close_fds(struct sk_fds *sk_fds) +{ + const __u32 expected_inherit_cb_flags = + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG | + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG | + BPF_SOCK_OPS_STATE_CB_FLAG; + + if (sk_fds_shutdown(sk_fds)) + goto check_linum; + + if (!ASSERT_EQ(expected_inherit_cb_flags, skel->bss->inherit_cb_flags, + "inherit_cb_flags")) + goto check_linum; + + if (check_hdr_stg(&exp_passive_hdr_stg, sk_fds->passive_fd, + "passive_hdr_stg")) + goto check_linum; + + if (check_hdr_stg(&exp_active_hdr_stg, sk_fds->active_fd, + "active_hdr_stg")) + goto check_linum; + + if (check_hdr_opt(&exp_passive_estab_in, &skel->bss->passive_estab_in, + "passive_estab_in")) + goto check_linum; + + if (check_hdr_opt(&exp_active_estab_in, &skel->bss->active_estab_in, + "active_estab_in")) + goto check_linum; + + if (check_hdr_opt(&exp_passive_fin_in, &skel->bss->passive_fin_in, + "passive_fin_in")) + goto check_linum; + + check_hdr_opt(&exp_active_fin_in, &skel->bss->active_fin_in, + "active_fin_in"); + +check_linum: + ASSERT_FALSE(check_error_linum(sk_fds), "check_error_linum"); + sk_fds_close(sk_fds); +} + +static void prepare_out(void) +{ + skel->bss->active_syn_out = exp_passive_estab_in; + skel->bss->passive_synack_out = exp_active_estab_in; + + skel->bss->active_fin_out = exp_passive_fin_in; + skel->bss->passive_fin_out = exp_active_fin_in; +} + +static void reset_test(void) +{ + size_t optsize = sizeof(struct bpf_test_option); + int lport, err; + + memset(&skel->bss->passive_synack_out, 0, optsize); + memset(&skel->bss->passive_fin_out, 0, optsize); + + memset(&skel->bss->passive_estab_in, 0, optsize); + memset(&skel->bss->passive_fin_in, 0, optsize); + + memset(&skel->bss->active_syn_out, 0, optsize); + memset(&skel->bss->active_fin_out, 0, optsize); + + memset(&skel->bss->active_estab_in, 0, optsize); + memset(&skel->bss->active_fin_in, 0, optsize); + + skel->bss->inherit_cb_flags = 0; + + skel->data->test_kind = TCPOPT_EXP; + skel->data->test_magic = 0xeB9F; + + memset(&exp_passive_estab_in, 0, optsize); + memset(&exp_active_estab_in, 0, optsize); + memset(&exp_passive_fin_in, 0, optsize); + memset(&exp_active_fin_in, 0, optsize); + + memset(&exp_passive_hdr_stg, 0, sizeof(exp_passive_hdr_stg)); + memset(&exp_active_hdr_stg, 0, sizeof(exp_active_hdr_stg)); + exp_active_hdr_stg.active = true; + + err = bpf_map_get_next_key(lport_linum_map_fd, NULL, &lport); + while (!err) { + bpf_map_delete_elem(lport_linum_map_fd, &lport); + err = bpf_map_get_next_key(lport_linum_map_fd, &lport, &lport); + } +} + +static void fastopen_estab(void) +{ + struct bpf_link *link; + struct sk_fds sk_fds; + + hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map); + lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map); + + exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS; + exp_passive_estab_in.rand = 0xfa; + exp_passive_estab_in.max_delack_ms = 11; + + exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS; + exp_active_estab_in.rand = 0xce; + exp_active_estab_in.max_delack_ms = 22; + + exp_passive_hdr_stg.fastopen = true; + + prepare_out(); + + /* Allow fastopen without fastopen cookie */ + if (write_sysctl("/proc/sys/net/ipv4/tcp_fastopen", "1543")) + return; + + link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)")) + return; + + if (sk_fds_connect(&sk_fds, true)) { + bpf_link__destroy(link); + return; + } + + check_hdr_and_close_fds(&sk_fds); + bpf_link__destroy(link); +} + +static void syncookie_estab(void) +{ + struct bpf_link *link; + struct sk_fds sk_fds; + + hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map); + lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map); + + exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS; + exp_passive_estab_in.rand = 0xfa; + exp_passive_estab_in.max_delack_ms = 11; + + exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS | + OPTION_F_RESEND; + exp_active_estab_in.rand = 0xce; + exp_active_estab_in.max_delack_ms = 22; + + exp_passive_hdr_stg.syncookie = true; + exp_active_hdr_stg.resend_syn = true, + + prepare_out(); + + /* Clear the RESEND to ensure the bpf prog can learn + * want_cookie and set the RESEND by itself. + */ + skel->bss->passive_synack_out.flags &= ~OPTION_F_RESEND; + + /* Enforce syncookie mode */ + if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2")) + return; + + link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)")) + return; + + if (sk_fds_connect(&sk_fds, false)) { + bpf_link__destroy(link); + return; + } + + check_hdr_and_close_fds(&sk_fds); + bpf_link__destroy(link); +} + +static void fin(void) +{ + struct bpf_link *link; + struct sk_fds sk_fds; + + hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map); + lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map); + + exp_passive_fin_in.flags = OPTION_F_RAND; + exp_passive_fin_in.rand = 0xfa; + + exp_active_fin_in.flags = OPTION_F_RAND; + exp_active_fin_in.rand = 0xce; + + prepare_out(); + + if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1")) + return; + + link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)")) + return; + + if (sk_fds_connect(&sk_fds, false)) { + bpf_link__destroy(link); + return; + } + + check_hdr_and_close_fds(&sk_fds); + bpf_link__destroy(link); +} + +static void __simple_estab(bool exprm) +{ + struct bpf_link *link; + struct sk_fds sk_fds; + + hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map); + lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map); + + exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS; + exp_passive_estab_in.rand = 0xfa; + exp_passive_estab_in.max_delack_ms = 11; + + exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS; + exp_active_estab_in.rand = 0xce; + exp_active_estab_in.max_delack_ms = 22; + + prepare_out(); + + if (!exprm) { + skel->data->test_kind = 0xB9; + skel->data->test_magic = 0; + } + + if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1")) + return; + + link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(estab)")) + return; + + if (sk_fds_connect(&sk_fds, false)) { + bpf_link__destroy(link); + return; + } + + check_hdr_and_close_fds(&sk_fds); + bpf_link__destroy(link); +} + +static void no_exprm_estab(void) +{ + __simple_estab(false); +} + +static void simple_estab(void) +{ + __simple_estab(true); +} + +static void misc(void) +{ + const char send_msg[] = "MISC!!!"; + char recv_msg[sizeof(send_msg)]; + const unsigned int nr_data = 2; + struct bpf_link *link; + struct sk_fds sk_fds; + int i, ret; + + lport_linum_map_fd = bpf_map__fd(misc_skel->maps.lport_linum_map); + + if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1")) + return; + + link = bpf_program__attach_cgroup(misc_skel->progs.misc_estab, cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(misc_estab)")) + return; + + if (sk_fds_connect(&sk_fds, false)) { + bpf_link__destroy(link); + return; + } + + for (i = 0; i < nr_data; i++) { + /* MSG_EOR to ensure skb will not be combined */ + ret = send(sk_fds.active_fd, send_msg, sizeof(send_msg), + MSG_EOR); + if (!ASSERT_EQ(ret, sizeof(send_msg), "send(msg)")) + goto check_linum; + + ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg)); + if (!ASSERT_EQ(ret, sizeof(send_msg), "read(msg)")) + goto check_linum; + } + + if (sk_fds_shutdown(&sk_fds)) + goto check_linum; + + ASSERT_EQ(misc_skel->bss->nr_syn, 1, "unexpected nr_syn"); + + ASSERT_EQ(misc_skel->bss->nr_data, nr_data, "unexpected nr_data"); + + /* The last ACK may have been delayed, so it is either 1 or 2. */ + CHECK(misc_skel->bss->nr_pure_ack != 1 && + misc_skel->bss->nr_pure_ack != 2, + "unexpected nr_pure_ack", + "expected (1 or 2) != actual (%u)\n", + misc_skel->bss->nr_pure_ack); + + ASSERT_EQ(misc_skel->bss->nr_fin, 1, "unexpected nr_fin"); + +check_linum: + ASSERT_FALSE(check_error_linum(&sk_fds), "check_error_linum"); + sk_fds_close(&sk_fds); + bpf_link__destroy(link); +} + +struct test { + const char *desc; + void (*run)(void); +}; + +#define DEF_TEST(name) { #name, name } +static struct test tests[] = { + DEF_TEST(simple_estab), + DEF_TEST(no_exprm_estab), + DEF_TEST(syncookie_estab), + DEF_TEST(fastopen_estab), + DEF_TEST(fin), + DEF_TEST(misc), +}; + +void test_tcp_hdr_options(void) +{ + int i; + + skel = test_tcp_hdr_options__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skel")) + return; + + misc_skel = test_misc_tcp_hdr_options__open_and_load(); + if (!ASSERT_OK_PTR(misc_skel, "open and load misc test skel")) + goto skel_destroy; + + cg_fd = test__join_cgroup(CG_NAME); + if (!ASSERT_GE(cg_fd, 0, "join_cgroup")) + goto skel_destroy; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (!test__start_subtest(tests[i].desc)) + continue; + + if (create_netns()) + break; + + tests[i].run(); + + reset_test(); + } + + close(cg_fd); +skel_destroy: + test_misc_tcp_hdr_options__destroy(misc_skel); + test_tcp_hdr_options__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c new file mode 100644 index 000000000..8fe84da1b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "tcp_rtt.skel.h" + +struct tcp_rtt_storage { + __u32 invoked; + __u32 dsack_dups; + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +static void send_byte(int fd) +{ + char b = 0x55; + + ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte"); +} + +static int wait_for_ack(int fd, int retries) +{ + struct tcp_info info; + socklen_t optlen; + int i, err; + + for (i = 0; i < retries; i++) { + optlen = sizeof(info); + err = getsockopt(fd, SOL_TCP, TCP_INFO, &info, &optlen); + if (err < 0) { + log_err("Failed to lookup TCP stats"); + return err; + } + + if (info.tcpi_unacked == 0) + return 0; + + usleep(10); + } + + log_err("Did not receive ACK"); + return -1; +} + +static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked, + __u32 dsack_dups, __u32 delivered, __u32 delivered_ce, + __u32 icsk_retransmits) +{ + int err = 0; + struct tcp_rtt_storage val; + + if (!ASSERT_GE(bpf_map_lookup_elem(map_fd, &client_fd, &val), 0, "read socket storage")) + return -1; + + if (val.invoked != invoked) { + log_err("%s: unexpected bpf_tcp_sock.invoked %d != %d", + msg, val.invoked, invoked); + err++; + } + + if (val.dsack_dups != dsack_dups) { + log_err("%s: unexpected bpf_tcp_sock.dsack_dups %d != %d", + msg, val.dsack_dups, dsack_dups); + err++; + } + + if (val.delivered != delivered) { + log_err("%s: unexpected bpf_tcp_sock.delivered %d != %d", + msg, val.delivered, delivered); + err++; + } + + if (val.delivered_ce != delivered_ce) { + log_err("%s: unexpected bpf_tcp_sock.delivered_ce %d != %d", + msg, val.delivered_ce, delivered_ce); + err++; + } + + if (val.icsk_retransmits != icsk_retransmits) { + log_err("%s: unexpected bpf_tcp_sock.icsk_retransmits %d != %d", + msg, val.icsk_retransmits, icsk_retransmits); + err++; + } + + return err; +} + + +static int run_test(int cgroup_fd, int server_fd) +{ + struct tcp_rtt *skel; + int client_fd; + int prog_fd; + int map_fd; + int err; + + skel = tcp_rtt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_load")) + return -1; + + map_fd = bpf_map__fd(skel->maps.socket_storage_map); + prog_fd = bpf_program__fd(skel->progs._sockops); + + err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); + if (err) { + log_err("Failed to attach BPF program"); + goto close_bpf_object; + } + + client_fd = connect_to_fd(server_fd, 0); + if (client_fd < 0) { + err = -1; + goto close_bpf_object; + } + + err += verify_sk(map_fd, client_fd, "syn-ack", + /*invoked=*/1, + /*dsack_dups=*/0, + /*delivered=*/1, + /*delivered_ce=*/0, + /*icsk_retransmits=*/0); + + send_byte(client_fd); + if (wait_for_ack(client_fd, 100) < 0) { + err = -1; + goto close_client_fd; + } + + + err += verify_sk(map_fd, client_fd, "first payload byte", + /*invoked=*/2, + /*dsack_dups=*/0, + /*delivered=*/2, + /*delivered_ce=*/0, + /*icsk_retransmits=*/0); + +close_client_fd: + close(client_fd); + +close_bpf_object: + tcp_rtt__destroy(skel); + return err; +} + +void test_tcp_rtt(void) +{ + int server_fd, cgroup_fd; + + cgroup_fd = test__join_cgroup("/tcp_rtt"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /tcp_rtt")) + return; + + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_server")) + goto close_cgroup_fd; + + ASSERT_OK(run_test(cgroup_fd, server_fd), "run_test"); + + close(server_fd); + +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c new file mode 100644 index 000000000..7e8fe1bad --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +#include "test_tcpbpf.h" +#include "test_tcpbpf_kern.skel.h" + +#define LO_ADDR6 "::1" +#define CG_NAME "/tcpbpf-user-test" + +static void verify_result(struct tcpbpf_globals *result) +{ + __u32 expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) | + (1 << BPF_SOCK_OPS_RWND_INIT) | + (1 << BPF_SOCK_OPS_TCP_CONNECT_CB) | + (1 << BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB) | + (1 << BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) | + (1 << BPF_SOCK_OPS_NEEDS_ECN) | + (1 << BPF_SOCK_OPS_STATE_CB) | + (1 << BPF_SOCK_OPS_TCP_LISTEN_CB)); + + /* check global map */ + ASSERT_EQ(expected_events, result->event_map, "event_map"); + + ASSERT_EQ(result->bytes_received, 501, "bytes_received"); + ASSERT_EQ(result->bytes_acked, 1002, "bytes_acked"); + ASSERT_EQ(result->data_segs_in, 1, "data_segs_in"); + ASSERT_EQ(result->data_segs_out, 1, "data_segs_out"); + ASSERT_EQ(result->bad_cb_test_rv, 0x80, "bad_cb_test_rv"); + ASSERT_EQ(result->good_cb_test_rv, 0, "good_cb_test_rv"); + ASSERT_EQ(result->num_listen, 1, "num_listen"); + + /* 3 comes from one listening socket + both ends of the connection */ + ASSERT_EQ(result->num_close_events, 3, "num_close_events"); + + /* check setsockopt for SAVE_SYN */ + ASSERT_EQ(result->tcp_save_syn, 0, "tcp_save_syn"); + + /* check getsockopt for SAVED_SYN */ + ASSERT_EQ(result->tcp_saved_syn, 1, "tcp_saved_syn"); + + /* check getsockopt for window_clamp */ + ASSERT_EQ(result->window_clamp_client, 9216, "window_clamp_client"); + ASSERT_EQ(result->window_clamp_server, 9216, "window_clamp_server"); +} + +static void run_test(struct tcpbpf_globals *result) +{ + int listen_fd = -1, cli_fd = -1, accept_fd = -1; + char buf[1000]; + int err = -1; + int i, rv; + + listen_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0); + if (!ASSERT_NEQ(listen_fd, -1, "start_server")) + goto done; + + cli_fd = connect_to_fd(listen_fd, 0); + if (!ASSERT_NEQ(cli_fd, -1, "connect_to_fd(listen_fd)")) + goto done; + + accept_fd = accept(listen_fd, NULL, NULL); + if (!ASSERT_NEQ(accept_fd, -1, "accept(listen_fd)")) + goto done; + + /* Send 1000B of '+'s from cli_fd -> accept_fd */ + for (i = 0; i < 1000; i++) + buf[i] = '+'; + + rv = send(cli_fd, buf, 1000, 0); + if (!ASSERT_EQ(rv, 1000, "send(cli_fd)")) + goto done; + + rv = recv(accept_fd, buf, 1000, 0); + if (!ASSERT_EQ(rv, 1000, "recv(accept_fd)")) + goto done; + + /* Send 500B of '.'s from accept_fd ->cli_fd */ + for (i = 0; i < 500; i++) + buf[i] = '.'; + + rv = send(accept_fd, buf, 500, 0); + if (!ASSERT_EQ(rv, 500, "send(accept_fd)")) + goto done; + + rv = recv(cli_fd, buf, 500, 0); + if (!ASSERT_EQ(rv, 500, "recv(cli_fd)")) + goto done; + + /* + * shutdown accept first to guarantee correct ordering for + * bytes_received and bytes_acked when we go to verify the results. + */ + shutdown(accept_fd, SHUT_WR); + err = recv(cli_fd, buf, 1, 0); + if (!ASSERT_OK(err, "recv(cli_fd) for fin")) + goto done; + + shutdown(cli_fd, SHUT_WR); + err = recv(accept_fd, buf, 1, 0); + ASSERT_OK(err, "recv(accept_fd) for fin"); +done: + if (accept_fd != -1) + close(accept_fd); + if (cli_fd != -1) + close(cli_fd); + if (listen_fd != -1) + close(listen_fd); + + if (!err) + verify_result(result); +} + +void test_tcpbpf_user(void) +{ + struct test_tcpbpf_kern *skel; + int cg_fd = -1; + + skel = test_tcpbpf_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skel")) + return; + + cg_fd = test__join_cgroup(CG_NAME); + if (!ASSERT_GE(cg_fd, 0, "test__join_cgroup(" CG_NAME ")")) + goto err; + + skel->links.bpf_testcb = bpf_program__attach_cgroup(skel->progs.bpf_testcb, cg_fd); + if (!ASSERT_OK_PTR(skel->links.bpf_testcb, "attach_cgroup(bpf_testcb)")) + goto err; + + run_test(&skel->bss->global); + +err: + if (cg_fd != -1) + close(cg_fd); + test_tcpbpf_kern__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c new file mode 100644 index 000000000..c381faaae --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright 2022 Sony Group Corporation */ +#include <sys/prctl.h> +#include <test_progs.h> +#include "bpf_syscall_macro.skel.h" + +void test_bpf_syscall_macro(void) +{ + struct bpf_syscall_macro *skel = NULL; + int err; + int exp_arg1 = 1001; + unsigned long exp_arg2 = 12; + unsigned long exp_arg3 = 13; + unsigned long exp_arg4 = 14; + unsigned long exp_arg5 = 15; + + /* check whether it can open program */ + skel = bpf_syscall_macro__open(); + if (!ASSERT_OK_PTR(skel, "bpf_syscall_macro__open")) + return; + + skel->rodata->filter_pid = getpid(); + + /* check whether it can load program */ + err = bpf_syscall_macro__load(skel); + if (!ASSERT_OK(err, "bpf_syscall_macro__load")) + goto cleanup; + + /* check whether it can attach kprobe */ + err = bpf_syscall_macro__attach(skel); + if (!ASSERT_OK(err, "bpf_syscall_macro__attach")) + goto cleanup; + + /* check whether args of syscall are copied correctly */ + prctl(exp_arg1, exp_arg2, exp_arg3, exp_arg4, exp_arg5); +#if defined(__aarch64__) || defined(__s390__) + ASSERT_NEQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); +#else + ASSERT_EQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); +#endif + ASSERT_EQ(skel->bss->arg2, exp_arg2, "syscall_arg2"); + ASSERT_EQ(skel->bss->arg3, exp_arg3, "syscall_arg3"); + /* it cannot copy arg4 when uses PT_REGS_PARM4 on x86_64 */ +#ifdef __x86_64__ + ASSERT_NEQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx"); +#else + ASSERT_EQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx"); +#endif + ASSERT_EQ(skel->bss->arg4, exp_arg4, "syscall_arg4"); + ASSERT_EQ(skel->bss->arg5, exp_arg5, "syscall_arg5"); + + /* check whether args of syscall are copied correctly for CORE variants */ + ASSERT_EQ(skel->bss->arg1_core, exp_arg1, "syscall_arg1_core_variant"); + ASSERT_EQ(skel->bss->arg2_core, exp_arg2, "syscall_arg2_core_variant"); + ASSERT_EQ(skel->bss->arg3_core, exp_arg3, "syscall_arg3_core_variant"); + /* it cannot copy arg4 when uses PT_REGS_PARM4_CORE on x86_64 */ +#ifdef __x86_64__ + ASSERT_NEQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant"); +#else + ASSERT_EQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant"); +#endif + ASSERT_EQ(skel->bss->arg4_core, exp_arg4, "syscall_arg4_core_variant"); + ASSERT_EQ(skel->bss->arg5_core, exp_arg5, "syscall_arg5_core_variant"); + + ASSERT_EQ(skel->bss->option_syscall, exp_arg1, "BPF_KPROBE_SYSCALL_option"); + ASSERT_EQ(skel->bss->arg2_syscall, exp_arg2, "BPF_KPROBE_SYSCALL_arg2"); + ASSERT_EQ(skel->bss->arg3_syscall, exp_arg3, "BPF_KPROBE_SYSCALL_arg3"); + ASSERT_EQ(skel->bss->arg4_syscall, exp_arg4, "BPF_KPROBE_SYSCALL_arg4"); + ASSERT_EQ(skel->bss->arg5_syscall, exp_arg5, "BPF_KPROBE_SYSCALL_arg5"); + +cleanup: + bpf_syscall_macro__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c new file mode 100644 index 000000000..214d9f4a9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#define _GNU_SOURCE +#include <stdio.h> +#include <sched.h> +#include <sys/mount.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <test_progs.h> + +#define TDIR "/sys/kernel/debug" + +static int read_iter(char *file) +{ + /* 1024 should be enough to get contiguous 4 "iter" letters at some point */ + char buf[1024]; + int fd, len; + + fd = open(file, 0); + if (fd < 0) + return -1; + while ((len = read(fd, buf, sizeof(buf))) > 0) { + buf[sizeof(buf) - 1] = '\0'; + if (strstr(buf, "iter")) { + close(fd); + return 0; + } + } + close(fd); + return -1; +} + +static int fn(void) +{ + struct stat a, b, c; + int err, map; + + err = unshare(CLONE_NEWNS); + if (!ASSERT_OK(err, "unshare")) + goto out; + + err = mount("", "/", "", MS_REC | MS_PRIVATE, NULL); + if (!ASSERT_OK(err, "mount /")) + goto out; + + err = umount(TDIR); + if (!ASSERT_OK(err, "umount " TDIR)) + goto out; + + err = mount("none", TDIR, "tmpfs", 0, NULL); + if (!ASSERT_OK(err, "mount tmpfs")) + goto out; + + err = mkdir(TDIR "/fs1", 0777); + if (!ASSERT_OK(err, "mkdir " TDIR "/fs1")) + goto out; + err = mkdir(TDIR "/fs2", 0777); + if (!ASSERT_OK(err, "mkdir " TDIR "/fs2")) + goto out; + + err = mount("bpf", TDIR "/fs1", "bpf", 0, NULL); + if (!ASSERT_OK(err, "mount bpffs " TDIR "/fs1")) + goto out; + err = mount("bpf", TDIR "/fs2", "bpf", 0, NULL); + if (!ASSERT_OK(err, "mount bpffs " TDIR "/fs2")) + goto out; + + err = read_iter(TDIR "/fs1/maps.debug"); + if (!ASSERT_OK(err, "reading " TDIR "/fs1/maps.debug")) + goto out; + err = read_iter(TDIR "/fs2/progs.debug"); + if (!ASSERT_OK(err, "reading " TDIR "/fs2/progs.debug")) + goto out; + + err = mkdir(TDIR "/fs1/a", 0777); + if (!ASSERT_OK(err, "creating " TDIR "/fs1/a")) + goto out; + err = mkdir(TDIR "/fs1/a/1", 0777); + if (!ASSERT_OK(err, "creating " TDIR "/fs1/a/1")) + goto out; + err = mkdir(TDIR "/fs1/b", 0777); + if (!ASSERT_OK(err, "creating " TDIR "/fs1/b")) + goto out; + + map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL); + if (!ASSERT_GT(map, 0, "create_map(ARRAY)")) + goto out; + err = bpf_obj_pin(map, TDIR "/fs1/c"); + if (!ASSERT_OK(err, "pin map")) + goto out; + close(map); + + /* Check that RENAME_EXCHANGE works for directories. */ + err = stat(TDIR "/fs1/a", &a); + if (!ASSERT_OK(err, "stat(" TDIR "/fs1/a)")) + goto out; + err = renameat2(0, TDIR "/fs1/a", 0, TDIR "/fs1/b", RENAME_EXCHANGE); + if (!ASSERT_OK(err, "renameat2(/fs1/a, /fs1/b, RENAME_EXCHANGE)")) + goto out; + err = stat(TDIR "/fs1/b", &b); + if (!ASSERT_OK(err, "stat(" TDIR "/fs1/b)")) + goto out; + if (!ASSERT_EQ(a.st_ino, b.st_ino, "b should have a's inode")) + goto out; + err = access(TDIR "/fs1/b/1", F_OK); + if (!ASSERT_OK(err, "access(" TDIR "/fs1/b/1)")) + goto out; + + /* Check that RENAME_EXCHANGE works for mixed file types. */ + err = stat(TDIR "/fs1/c", &c); + if (!ASSERT_OK(err, "stat(" TDIR "/fs1/map)")) + goto out; + err = renameat2(0, TDIR "/fs1/c", 0, TDIR "/fs1/b", RENAME_EXCHANGE); + if (!ASSERT_OK(err, "renameat2(/fs1/c, /fs1/b, RENAME_EXCHANGE)")) + goto out; + err = stat(TDIR "/fs1/b", &b); + if (!ASSERT_OK(err, "stat(" TDIR "/fs1/b)")) + goto out; + if (!ASSERT_EQ(c.st_ino, b.st_ino, "b should have c's inode")) + goto out; + err = access(TDIR "/fs1/c/1", F_OK); + if (!ASSERT_OK(err, "access(" TDIR "/fs1/c/1)")) + goto out; + + /* Check that RENAME_NOREPLACE works. */ + err = renameat2(0, TDIR "/fs1/b", 0, TDIR "/fs1/a", RENAME_NOREPLACE); + if (!ASSERT_ERR(err, "renameat2(RENAME_NOREPLACE)")) { + err = -EINVAL; + goto out; + } + err = access(TDIR "/fs1/b", F_OK); + if (!ASSERT_OK(err, "access(" TDIR "/fs1/b)")) + goto out; + +out: + umount(TDIR "/fs1"); + umount(TDIR "/fs2"); + rmdir(TDIR "/fs1"); + rmdir(TDIR "/fs2"); + umount(TDIR); + exit(err); +} + +void test_test_bpffs(void) +{ + int err, duration = 0, status = 0; + pid_t pid; + + pid = fork(); + if (CHECK(pid == -1, "clone", "clone failed %d", errno)) + return; + if (pid == 0) + fn(); + err = waitpid(pid, &status, 0); + if (CHECK(err == -1 && errno != ECHILD, "waitpid", "failed %d", errno)) + return; + if (CHECK(WEXITSTATUS(status), "bpffs test ", "failed %d", WEXITSTATUS(status))) + return; +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c new file mode 100644 index 000000000..a0054019e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2020 Google LLC. + */ + +#include <test_progs.h> +#include <linux/limits.h> + +#include "bprm_opts.skel.h" +#include "network_helpers.h" +#include "task_local_storage_helpers.h" + +static const char * const bash_envp[] = { "TMPDIR=shouldnotbeset", NULL }; + +static int update_storage(int map_fd, int secureexec) +{ + int task_fd, ret = 0; + + task_fd = sys_pidfd_open(getpid(), 0); + if (task_fd < 0) + return errno; + + ret = bpf_map_update_elem(map_fd, &task_fd, &secureexec, BPF_NOEXIST); + if (ret) + ret = errno; + + close(task_fd); + return ret; +} + +static int run_set_secureexec(int map_fd, int secureexec) +{ + int child_pid, child_status, ret, null_fd; + + child_pid = fork(); + if (child_pid == 0) { + null_fd = open("/dev/null", O_WRONLY); + if (null_fd == -1) + exit(errno); + dup2(null_fd, STDOUT_FILENO); + dup2(null_fd, STDERR_FILENO); + close(null_fd); + + /* Ensure that all executions from hereon are + * secure by setting a local storage which is read by + * the bprm_creds_for_exec hook and sets bprm->secureexec. + */ + ret = update_storage(map_fd, secureexec); + if (ret) + exit(ret); + + /* If the binary is executed with securexec=1, the dynamic + * loader ingores and unsets certain variables like LD_PRELOAD, + * TMPDIR etc. TMPDIR is used here to simplify the example, as + * LD_PRELOAD requires a real .so file. + * + * If the value of TMPDIR is set, the bash command returns 10 + * and if the value is unset, it returns 20. + */ + execle("/bin/bash", "bash", "-c", + "[[ -z \"${TMPDIR}\" ]] || exit 10 && exit 20", NULL, + bash_envp); + exit(errno); + } else if (child_pid > 0) { + waitpid(child_pid, &child_status, 0); + ret = WEXITSTATUS(child_status); + + /* If a secureexec occurred, the exit status should be 20 */ + if (secureexec && ret == 20) + return 0; + + /* If normal execution happened, the exit code should be 10 */ + if (!secureexec && ret == 10) + return 0; + } + + return -EINVAL; +} + +void test_test_bprm_opts(void) +{ + int err, duration = 0; + struct bprm_opts *skel = NULL; + + skel = bprm_opts__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton failed\n")) + goto close_prog; + + err = bprm_opts__attach(skel); + if (CHECK(err, "attach", "attach failed: %d\n", err)) + goto close_prog; + + /* Run the test with the secureexec bit unset */ + err = run_set_secureexec(bpf_map__fd(skel->maps.secure_exec_task_map), + 0 /* secureexec */); + if (CHECK(err, "run_set_secureexec:0", "err = %d\n", err)) + goto close_prog; + + /* Run the test with the secureexec bit set */ + err = run_set_secureexec(bpf_map__fd(skel->maps.secure_exec_task_map), + 1 /* secureexec */); + if (CHECK(err, "run_set_secureexec:1", "err = %d\n", err)) + goto close_prog; + +close_prog: + bprm_opts__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c new file mode 100644 index 000000000..7295cc60f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> + +const char *err_str; +bool found; + +static int libbpf_debug_print(enum libbpf_print_level level, + const char *format, va_list args) +{ + char *log_buf; + + if (level != LIBBPF_WARN || + strcmp(format, "libbpf: \n%s\n")) { + vprintf(format, args); + return 0; + } + + log_buf = va_arg(args, char *); + if (!log_buf) + goto out; + if (err_str && strstr(log_buf, err_str) == 0) + found = true; +out: + printf(format, log_buf); + return 0; +} + +extern int extra_prog_load_log_flags; + +static int check_load(const char *file) +{ + struct bpf_object *obj = NULL; + struct bpf_program *prog; + int err; + + found = false; + + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (err) + return err; + + prog = bpf_object__next_program(obj, NULL); + if (!prog) { + err = -ENOENT; + goto err_out; + } + + bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32); + bpf_program__set_log_level(prog, extra_prog_load_log_flags); + + err = bpf_object__load(obj); + +err_out: + bpf_object__close(obj); + return err; +} + +struct test_def { + const char *file; + const char *err_str; +}; + +void test_test_global_funcs(void) +{ + struct test_def tests[] = { + { "test_global_func1.bpf.o", "combined stack size of 4 calls is 544" }, + { "test_global_func2.bpf.o" }, + { "test_global_func3.bpf.o", "the call stack of 8 frames" }, + { "test_global_func4.bpf.o" }, + { "test_global_func5.bpf.o", "expected pointer to ctx, but got PTR" }, + { "test_global_func6.bpf.o", "modified ctx ptr R2" }, + { "test_global_func7.bpf.o", "foo() doesn't return scalar" }, + { "test_global_func8.bpf.o" }, + { "test_global_func9.bpf.o" }, + { "test_global_func10.bpf.o", "invalid indirect read from stack" }, + { "test_global_func11.bpf.o", "Caller passes invalid args into func#1" }, + { "test_global_func12.bpf.o", "invalid mem access 'mem_or_null'" }, + { "test_global_func13.bpf.o", "Caller passes invalid args into func#1" }, + { "test_global_func14.bpf.o", "reference type('FWD S') size cannot be determined" }, + { "test_global_func15.bpf.o", "At program exit the register R0 has value" }, + { "test_global_func16.bpf.o", "invalid indirect read from stack" }, + { "test_global_func17.bpf.o", "Caller passes invalid args into func#1" }, + }; + libbpf_print_fn_t old_print_fn = NULL; + int err, i, duration = 0; + + old_print_fn = libbpf_set_print(libbpf_debug_print); + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + const struct test_def *test = &tests[i]; + + if (!test__start_subtest(test->file)) + continue; + + err_str = test->err_str; + err = check_load(test->file); + CHECK_FAIL(!!err ^ !!err_str); + if (err_str) + CHECK(found, "", "expected string '%s'", err_str); + } + libbpf_set_print(old_print_fn); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c new file mode 100644 index 000000000..b13feceb3 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2020 Google LLC. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <sys/wait.h> +#include <test_progs.h> +#include <linux/ring_buffer.h> + +#include "ima.skel.h" + +#define MAX_SAMPLES 4 + +static int _run_measured_process(const char *measured_dir, u32 *monitored_pid, + const char *cmd) +{ + int child_pid, child_status; + + child_pid = fork(); + if (child_pid == 0) { + *monitored_pid = getpid(); + execlp("./ima_setup.sh", "./ima_setup.sh", cmd, measured_dir, + NULL); + exit(errno); + + } else if (child_pid > 0) { + waitpid(child_pid, &child_status, 0); + return WEXITSTATUS(child_status); + } + + return -EINVAL; +} + +static int run_measured_process(const char *measured_dir, u32 *monitored_pid) +{ + return _run_measured_process(measured_dir, monitored_pid, "run"); +} + +static u64 ima_hash_from_bpf[MAX_SAMPLES]; +static int ima_hash_from_bpf_idx; + +static int process_sample(void *ctx, void *data, size_t len) +{ + if (ima_hash_from_bpf_idx >= MAX_SAMPLES) + return -ENOSPC; + + ima_hash_from_bpf[ima_hash_from_bpf_idx++] = *((u64 *)data); + return 0; +} + +static void test_init(struct ima__bss *bss) +{ + ima_hash_from_bpf_idx = 0; + + bss->use_ima_file_hash = false; + bss->enable_bprm_creds_for_exec = false; + bss->enable_kernel_read_file = false; + bss->test_deny = false; +} + +void test_test_ima(void) +{ + char measured_dir_template[] = "/tmp/ima_measuredXXXXXX"; + struct ring_buffer *ringbuf = NULL; + const char *measured_dir; + u64 bin_true_sample; + char cmd[256]; + + int err, duration = 0; + struct ima *skel = NULL; + + skel = ima__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton failed\n")) + goto close_prog; + + ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), + process_sample, NULL, NULL); + if (!ASSERT_OK_PTR(ringbuf, "ringbuf")) + goto close_prog; + + err = ima__attach(skel); + if (CHECK(err, "attach", "attach failed: %d\n", err)) + goto close_prog; + + measured_dir = mkdtemp(measured_dir_template); + if (CHECK(measured_dir == NULL, "mkdtemp", "err %d\n", errno)) + goto close_prog; + + snprintf(cmd, sizeof(cmd), "./ima_setup.sh setup %s", measured_dir); + err = system(cmd); + if (CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno)) + goto close_clean; + + /* + * Test #1 + * - Goal: obtain a sample with the bpf_ima_inode_hash() helper + * - Expected result: 1 sample (/bin/true) + */ + test_init(skel->bss); + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #1", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 1, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + + /* + * Test #2 + * - Goal: obtain samples with the bpf_ima_file_hash() helper + * - Expected result: 2 samples (./ima_setup.sh, /bin/true) + */ + test_init(skel->bss); + skel->bss->use_ima_file_hash = true; + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #2", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 2, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + bin_true_sample = ima_hash_from_bpf[1]; + + /* + * Test #3 + * - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest + * - Expected result: 2 samples (/bin/true: non-fresh, fresh) + */ + test_init(skel->bss); + + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "modify-bin"); + if (CHECK(err, "modify-bin #3", "err = %d\n", err)) + goto close_clean; + + skel->bss->enable_bprm_creds_for_exec = true; + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #3", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 2, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample, "sample_equal_or_err"); + /* IMA refreshed the digest. */ + ASSERT_NEQ(ima_hash_from_bpf[1], bin_true_sample, + "sample_different_or_err"); + + /* + * Test #4 + * - Goal: verify that bpf_ima_file_hash() returns a fresh digest + * - Expected result: 4 samples (./ima_setup.sh: fresh, fresh; + * /bin/true: fresh, fresh) + */ + test_init(skel->bss); + skel->bss->use_ima_file_hash = true; + skel->bss->enable_bprm_creds_for_exec = true; + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #4", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 4, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[2], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[3], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[2], bin_true_sample, + "sample_different_or_err"); + ASSERT_EQ(ima_hash_from_bpf[3], ima_hash_from_bpf[2], + "sample_equal_or_err"); + + skel->bss->use_ima_file_hash = false; + skel->bss->enable_bprm_creds_for_exec = false; + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "restore-bin"); + if (CHECK(err, "restore-bin #3", "err = %d\n", err)) + goto close_clean; + + /* + * Test #5 + * - Goal: obtain a sample from the kernel_read_file hook + * - Expected result: 2 samples (./ima_setup.sh, policy_test) + */ + test_init(skel->bss); + skel->bss->use_ima_file_hash = true; + skel->bss->enable_kernel_read_file = true; + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "load-policy"); + if (CHECK(err, "run_measured_process #5", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 2, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + + /* + * Test #6 + * - Goal: ensure that the kernel_read_file hook denies an operation + * - Expected result: 0 samples + */ + test_init(skel->bss); + skel->bss->enable_kernel_read_file = true; + skel->bss->test_deny = true; + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "load-policy"); + if (CHECK(!err, "run_measured_process #6", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 0, "num_samples_or_err"); + +close_clean: + snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir); + err = system(cmd); + CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno); +close_prog: + ring_buffer__free(ringbuf); + ima__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c new file mode 100644 index 000000000..9c77cd6b1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2020 Google LLC. + */ + +#include <asm-generic/errno-base.h> +#include <sys/stat.h> +#include <test_progs.h> +#include <linux/limits.h> + +#include "local_storage.skel.h" +#include "network_helpers.h" +#include "task_local_storage_helpers.h" + +static unsigned int duration; + +#define TEST_STORAGE_VALUE 0xbeefdead + +struct storage { + void *inode; + unsigned int value; +}; + +/* Fork and exec the provided rm binary and return the exit code of the + * forked process and its pid. + */ +static int run_self_unlink(int *monitored_pid, const char *rm_path) +{ + int child_pid, child_status, ret; + int null_fd; + + child_pid = fork(); + if (child_pid == 0) { + null_fd = open("/dev/null", O_WRONLY); + dup2(null_fd, STDOUT_FILENO); + dup2(null_fd, STDERR_FILENO); + close(null_fd); + + *monitored_pid = getpid(); + /* Use the copied /usr/bin/rm to delete itself + * /tmp/copy_of_rm /tmp/copy_of_rm. + */ + ret = execlp(rm_path, rm_path, rm_path, NULL); + if (ret) + exit(errno); + } else if (child_pid > 0) { + waitpid(child_pid, &child_status, 0); + return WEXITSTATUS(child_status); + } + + return -EINVAL; +} + +static bool check_syscall_operations(int map_fd, int obj_fd) +{ + struct storage val = { .value = TEST_STORAGE_VALUE }, + lookup_val = { .value = 0 }; + int err; + + /* Looking up an existing element should fail initially */ + err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0); + if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", + "err:%d errno:%d\n", err, errno)) + return false; + + /* Create a new element */ + err = bpf_map_update_elem(map_fd, &obj_fd, &val, BPF_NOEXIST); + if (CHECK(err < 0, "bpf_map_update_elem", "err:%d errno:%d\n", err, + errno)) + return false; + + /* Lookup the newly created element */ + err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0); + if (CHECK(err < 0, "bpf_map_lookup_elem", "err:%d errno:%d", err, + errno)) + return false; + + /* Check the value of the newly created element */ + if (CHECK(lookup_val.value != val.value, "bpf_map_lookup_elem", + "value got = %x errno:%d", lookup_val.value, val.value)) + return false; + + err = bpf_map_delete_elem(map_fd, &obj_fd); + if (CHECK(err, "bpf_map_delete_elem()", "err:%d errno:%d\n", err, + errno)) + return false; + + /* The lookup should fail, now that the element has been deleted */ + err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0); + if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", + "err:%d errno:%d\n", err, errno)) + return false; + + return true; +} + +void test_test_local_storage(void) +{ + char tmp_dir_path[] = "/tmp/local_storageXXXXXX"; + int err, serv_sk = -1, task_fd = -1, rm_fd = -1; + struct local_storage *skel = NULL; + char tmp_exec_path[64]; + char cmd[256]; + + skel = local_storage__open_and_load(); + if (CHECK(!skel, "skel_load", "lsm skeleton failed\n")) + goto close_prog; + + err = local_storage__attach(skel); + if (CHECK(err, "attach", "lsm attach failed: %d\n", err)) + goto close_prog; + + task_fd = sys_pidfd_open(getpid(), 0); + if (CHECK(task_fd < 0, "pidfd_open", + "failed to get pidfd err:%d, errno:%d", task_fd, errno)) + goto close_prog; + + if (!check_syscall_operations(bpf_map__fd(skel->maps.task_storage_map), + task_fd)) + goto close_prog; + + if (CHECK(!mkdtemp(tmp_dir_path), "mkdtemp", + "unable to create tmpdir: %d\n", errno)) + goto close_prog; + + snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm", + tmp_dir_path); + snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path); + if (CHECK_FAIL(system(cmd))) + goto close_prog_rmdir; + + rm_fd = open(tmp_exec_path, O_RDONLY); + if (CHECK(rm_fd < 0, "open", "failed to open %s err:%d, errno:%d", + tmp_exec_path, rm_fd, errno)) + goto close_prog_rmdir; + + if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map), + rm_fd)) + goto close_prog_rmdir; + + /* Sets skel->bss->monitored_pid to the pid of the forked child + * forks a child process that executes tmp_exec_path and tries to + * unlink its executable. This operation should be denied by the loaded + * LSM program. + */ + err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path); + if (CHECK(err != EPERM, "run_self_unlink", "err %d want EPERM\n", err)) + goto close_prog_rmdir; + + /* Set the process being monitored to be the current process */ + skel->bss->monitored_pid = getpid(); + + /* Move copy_of_rm to a new location so that it triggers the + * inode_rename LSM hook with a new_dentry that has a NULL inode ptr. + */ + snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr", + tmp_dir_path, tmp_dir_path); + if (CHECK_FAIL(system(cmd))) + goto close_prog_rmdir; + + CHECK(skel->data->inode_storage_result != 0, "inode_storage_result", + "inode_local_storage not set\n"); + + serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (CHECK(serv_sk < 0, "start_server", "failed to start server\n")) + goto close_prog_rmdir; + + CHECK(skel->data->sk_storage_result != 0, "sk_storage_result", + "sk_local_storage not set\n"); + + if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map), + serv_sk)) + goto close_prog_rmdir; + +close_prog_rmdir: + snprintf(cmd, sizeof(cmd), "rm -rf %s", tmp_dir_path); + system(cmd); +close_prog: + close(serv_sk); + close(rm_fd); + close(task_fd); + local_storage__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c new file mode 100644 index 000000000..244c01125 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2020 Google LLC. + */ + +#include <test_progs.h> +#include <sys/mman.h> +#include <sys/wait.h> +#include <unistd.h> +#include <malloc.h> +#include <stdlib.h> + +#include "lsm.skel.h" + +char *CMD_ARGS[] = {"true", NULL}; + +#define GET_PAGE_ADDR(ADDR, PAGE_SIZE) \ + (char *)(((unsigned long) (ADDR + PAGE_SIZE)) & ~(PAGE_SIZE-1)) + +int stack_mprotect(void) +{ + void *buf; + long sz; + int ret; + + sz = sysconf(_SC_PAGESIZE); + if (sz < 0) + return sz; + + buf = alloca(sz * 3); + ret = mprotect(GET_PAGE_ADDR(buf, sz), sz, + PROT_READ | PROT_WRITE | PROT_EXEC); + return ret; +} + +int exec_cmd(int *monitored_pid) +{ + int child_pid, child_status; + + child_pid = fork(); + if (child_pid == 0) { + *monitored_pid = getpid(); + execvp(CMD_ARGS[0], CMD_ARGS); + return -EINVAL; + } else if (child_pid > 0) { + waitpid(child_pid, &child_status, 0); + return child_status; + } + + return -EINVAL; +} + +static int test_lsm(struct lsm *skel) +{ + struct bpf_link *link; + int buf = 1234; + int err; + + err = lsm__attach(skel); + if (!ASSERT_OK(err, "attach")) + return err; + + /* Check that already linked program can't be attached again. */ + link = bpf_program__attach(skel->progs.test_int_hook); + if (!ASSERT_ERR_PTR(link, "attach_link")) + return -1; + + err = exec_cmd(&skel->bss->monitored_pid); + if (!ASSERT_OK(err, "exec_cmd")) + return err; + + ASSERT_EQ(skel->bss->bprm_count, 1, "bprm_count"); + + skel->bss->monitored_pid = getpid(); + + err = stack_mprotect(); + if (!ASSERT_EQ(errno, EPERM, "stack_mprotect")) + return err; + + ASSERT_EQ(skel->bss->mprotect_count, 1, "mprotect_count"); + + syscall(__NR_setdomainname, &buf, -2L); + syscall(__NR_setdomainname, 0, -3L); + syscall(__NR_setdomainname, ~0L, -4L); + + ASSERT_EQ(skel->bss->copy_test, 3, "copy_test"); + + lsm__detach(skel); + + skel->bss->copy_test = 0; + skel->bss->bprm_count = 0; + skel->bss->mprotect_count = 0; + return 0; +} + +void test_test_lsm(void) +{ + struct lsm *skel = NULL; + int err; + + skel = lsm__open_and_load(); + if (!ASSERT_OK_PTR(skel, "lsm_skel_load")) + goto close_prog; + + err = test_lsm(skel); + if (!ASSERT_OK(err, "test_lsm_first_attach")) + goto close_prog; + + err = test_lsm(skel); + ASSERT_OK(err, "test_lsm_second_attach"); + +close_prog: + lsm__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c new file mode 100644 index 000000000..f27013e38 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019 Facebook */ +#define _GNU_SOURCE +#include <sched.h> +#include <sys/prctl.h> +#include <test_progs.h> + +#define MAX_CNT 100000 + +static __u64 time_get_ns(void) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000ull + ts.tv_nsec; +} + +static int test_task_rename(const char *prog) +{ + int i, fd, duration = 0, err; + char buf[] = "test_overhead"; + __u64 start_time; + + fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); + if (CHECK(fd < 0, "open /proc", "err %d", errno)) + return -1; + start_time = time_get_ns(); + for (i = 0; i < MAX_CNT; i++) { + err = write(fd, buf, sizeof(buf)); + if (err < 0) { + CHECK(err < 0, "task rename", "err %d", errno); + close(fd); + return -1; + } + } + printf("task_rename %s\t%lluK events per sec\n", prog, + MAX_CNT * 1000000ll / (time_get_ns() - start_time)); + close(fd); + return 0; +} + +static void test_run(const char *prog) +{ + test_task_rename(prog); +} + +static void setaffinity(void) +{ + cpu_set_t cpuset; + int cpu = 0; + + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + sched_setaffinity(0, sizeof(cpuset), &cpuset); +} + +void test_test_overhead(void) +{ + const char *kprobe_name = "prog1"; + const char *kretprobe_name = "prog2"; + const char *raw_tp_name = "prog3"; + const char *fentry_name = "prog4"; + const char *fexit_name = "prog5"; + const char *kprobe_func = "__set_task_comm"; + struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog; + struct bpf_program *fentry_prog, *fexit_prog; + struct bpf_object *obj; + struct bpf_link *link; + int err, duration = 0; + char comm[16] = {}; + + if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L))) + return; + + obj = bpf_object__open_file("./test_overhead.bpf.o", NULL); + if (!ASSERT_OK_PTR(obj, "obj_open_file")) + return; + + kprobe_prog = bpf_object__find_program_by_name(obj, kprobe_name); + if (CHECK(!kprobe_prog, "find_probe", + "prog '%s' not found\n", kprobe_name)) + goto cleanup; + kretprobe_prog = bpf_object__find_program_by_name(obj, kretprobe_name); + if (CHECK(!kretprobe_prog, "find_probe", + "prog '%s' not found\n", kretprobe_name)) + goto cleanup; + raw_tp_prog = bpf_object__find_program_by_name(obj, raw_tp_name); + if (CHECK(!raw_tp_prog, "find_probe", + "prog '%s' not found\n", raw_tp_name)) + goto cleanup; + fentry_prog = bpf_object__find_program_by_name(obj, fentry_name); + if (CHECK(!fentry_prog, "find_probe", + "prog '%s' not found\n", fentry_name)) + goto cleanup; + fexit_prog = bpf_object__find_program_by_name(obj, fexit_name); + if (CHECK(!fexit_prog, "find_probe", + "prog '%s' not found\n", fexit_name)) + goto cleanup; + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup; + + setaffinity(); + + /* base line run */ + test_run("base"); + + /* attach kprobe */ + link = bpf_program__attach_kprobe(kprobe_prog, false /* retprobe */, + kprobe_func); + if (!ASSERT_OK_PTR(link, "attach_kprobe")) + goto cleanup; + test_run("kprobe"); + bpf_link__destroy(link); + + /* attach kretprobe */ + link = bpf_program__attach_kprobe(kretprobe_prog, true /* retprobe */, + kprobe_func); + if (!ASSERT_OK_PTR(link, "attach_kretprobe")) + goto cleanup; + test_run("kretprobe"); + bpf_link__destroy(link); + + /* attach raw_tp */ + link = bpf_program__attach_raw_tracepoint(raw_tp_prog, "task_rename"); + if (!ASSERT_OK_PTR(link, "attach_raw_tp")) + goto cleanup; + test_run("raw_tp"); + bpf_link__destroy(link); + + /* attach fentry */ + link = bpf_program__attach_trace(fentry_prog); + if (!ASSERT_OK_PTR(link, "attach_fentry")) + goto cleanup; + test_run("fentry"); + bpf_link__destroy(link); + + /* attach fexit */ + link = bpf_program__attach_trace(fexit_prog); + if (!ASSERT_OK_PTR(link, "attach_fexit")) + goto cleanup; + test_run("fexit"); + bpf_link__destroy(link); + +cleanup: + prctl(PR_SET_NAME, comm, 0L, 0L, 0L); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_profiler.c b/tools/testing/selftests/bpf/prog_tests/test_profiler.c new file mode 100644 index 000000000..de24e8f0e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_profiler.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> +#include "progs/profiler.h" +#include "profiler1.skel.h" +#include "profiler2.skel.h" +#include "profiler3.skel.h" + +static int sanity_run(struct bpf_program *prog) +{ + LIBBPF_OPTS(bpf_test_run_opts, test_attr); + __u64 args[] = {1, 2, 3}; + int err, prog_fd; + + prog_fd = bpf_program__fd(prog); + test_attr.ctx_in = args; + test_attr.ctx_size_in = sizeof(args); + err = bpf_prog_test_run_opts(prog_fd, &test_attr); + if (!ASSERT_OK(err, "test_run")) + return -1; + + if (!ASSERT_OK(test_attr.retval, "test_run retval")) + return -1; + + return 0; +} + +void test_test_profiler(void) +{ + struct profiler1 *profiler1_skel = NULL; + struct profiler2 *profiler2_skel = NULL; + struct profiler3 *profiler3_skel = NULL; + __u32 duration = 0; + int err; + + profiler1_skel = profiler1__open_and_load(); + if (CHECK(!profiler1_skel, "profiler1_skel_load", "profiler1 skeleton failed\n")) + goto cleanup; + + err = profiler1__attach(profiler1_skel); + if (CHECK(err, "profiler1_attach", "profiler1 attach failed: %d\n", err)) + goto cleanup; + + if (sanity_run(profiler1_skel->progs.raw_tracepoint__sched_process_exec)) + goto cleanup; + + profiler2_skel = profiler2__open_and_load(); + if (CHECK(!profiler2_skel, "profiler2_skel_load", "profiler2 skeleton failed\n")) + goto cleanup; + + err = profiler2__attach(profiler2_skel); + if (CHECK(err, "profiler2_attach", "profiler2 attach failed: %d\n", err)) + goto cleanup; + + if (sanity_run(profiler2_skel->progs.raw_tracepoint__sched_process_exec)) + goto cleanup; + + profiler3_skel = profiler3__open_and_load(); + if (CHECK(!profiler3_skel, "profiler3_skel_load", "profiler3 skeleton failed\n")) + goto cleanup; + + err = profiler3__attach(profiler3_skel); + if (CHECK(err, "profiler3_attach", "profiler3 attach failed: %d\n", err)) + goto cleanup; + + if (sanity_run(profiler3_skel->progs.raw_tracepoint__sched_process_exec)) + goto cleanup; +cleanup: + profiler1__destroy(profiler1_skel); + profiler2__destroy(profiler2_skel); + profiler3__destroy(profiler3_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c new file mode 100644 index 000000000..ae93411fd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <test_progs.h> +#include <network_helpers.h> +#include "skb_pkt_end.skel.h" + +static int sanity_run(struct bpf_program *prog) +{ + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + prog_fd = bpf_program__fd(prog); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run")) + return -1; + if (!ASSERT_EQ(topts.retval, 123, "test_run retval")) + return -1; + return 0; +} + +void test_test_skb_pkt_end(void) +{ + struct skb_pkt_end *skb_pkt_end_skel = NULL; + __u32 duration = 0; + int err; + + skb_pkt_end_skel = skb_pkt_end__open_and_load(); + if (CHECK(!skb_pkt_end_skel, "skb_pkt_end_skel_load", "skb_pkt_end skeleton failed\n")) + goto cleanup; + + err = skb_pkt_end__attach(skb_pkt_end_skel); + if (CHECK(err, "skb_pkt_end_attach", "skb_pkt_end attach failed: %d\n", err)) + goto cleanup; + + if (sanity_run(skb_pkt_end_skel->progs.main_prog)) + goto cleanup; + +cleanup: + skb_pkt_end__destroy(skb_pkt_end_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c new file mode 100644 index 000000000..7ddd6615b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021. Huawei Technologies Co., Ltd */ +#include <test_progs.h> +#include "strncmp_test.skel.h" + +static int trigger_strncmp(const struct strncmp_test *skel) +{ + int cmp; + + usleep(1); + + cmp = skel->bss->cmp_ret; + if (cmp > 0) + return 1; + if (cmp < 0) + return -1; + return 0; +} + +/* + * Compare str and target after making str[i] != target[i]. + * When exp is -1, make str[i] < target[i] and delta = -1. + */ +static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name, + int exp) +{ + size_t nr = sizeof(skel->bss->str); + char *str = skel->bss->str; + int delta = exp; + int got; + size_t i; + + memcpy(str, skel->rodata->target, nr); + for (i = 0; i < nr - 1; i++) { + str[i] += delta; + + got = trigger_strncmp(skel); + ASSERT_EQ(got, exp, name); + + str[i] -= delta; + } +} + +static void test_strncmp_ret(void) +{ + struct strncmp_test *skel; + int err, got; + + skel = strncmp_test__open(); + if (!ASSERT_OK_PTR(skel, "strncmp_test open")) + return; + + bpf_program__set_autoload(skel->progs.do_strncmp, true); + + err = strncmp_test__load(skel); + if (!ASSERT_EQ(err, 0, "strncmp_test load")) + goto out; + + err = strncmp_test__attach(skel); + if (!ASSERT_EQ(err, 0, "strncmp_test attach")) + goto out; + + skel->bss->target_pid = getpid(); + + /* Empty str */ + skel->bss->str[0] = '\0'; + got = trigger_strncmp(skel); + ASSERT_EQ(got, -1, "strncmp: empty str"); + + /* Same string */ + memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str)); + got = trigger_strncmp(skel); + ASSERT_EQ(got, 0, "strncmp: same str"); + + /* Not-null-termainted string */ + memcpy(skel->bss->str, skel->rodata->target, sizeof(skel->bss->str)); + skel->bss->str[sizeof(skel->bss->str) - 1] = 'A'; + got = trigger_strncmp(skel); + ASSERT_EQ(got, 1, "strncmp: not-null-term str"); + + strncmp_full_str_cmp(skel, "strncmp: less than", -1); + strncmp_full_str_cmp(skel, "strncmp: greater than", 1); +out: + strncmp_test__destroy(skel); +} + +static void test_strncmp_bad_not_const_str_size(void) +{ + struct strncmp_test *skel; + int err; + + skel = strncmp_test__open(); + if (!ASSERT_OK_PTR(skel, "strncmp_test open")) + return; + + bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, true); + + err = strncmp_test__load(skel); + ASSERT_ERR(err, "strncmp_test load bad_not_const_str_size"); + + strncmp_test__destroy(skel); +} + +static void test_strncmp_bad_writable_target(void) +{ + struct strncmp_test *skel; + int err; + + skel = strncmp_test__open(); + if (!ASSERT_OK_PTR(skel, "strncmp_test open")) + return; + + bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, true); + + err = strncmp_test__load(skel); + ASSERT_ERR(err, "strncmp_test load bad_writable_target"); + + strncmp_test__destroy(skel); +} + +static void test_strncmp_bad_not_null_term_target(void) +{ + struct strncmp_test *skel; + int err; + + skel = strncmp_test__open(); + if (!ASSERT_OK_PTR(skel, "strncmp_test open")) + return; + + bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, true); + + err = strncmp_test__load(skel); + ASSERT_ERR(err, "strncmp_test load bad_not_null_term_target"); + + strncmp_test__destroy(skel); +} + +void test_test_strncmp(void) +{ + if (test__start_subtest("strncmp_ret")) + test_strncmp_ret(); + if (test__start_subtest("strncmp_bad_not_const_str_size")) + test_strncmp_bad_not_const_str_size(); + if (test__start_subtest("strncmp_bad_writable_target")) + test_strncmp_bad_writable_target(); + if (test__start_subtest("strncmp_bad_not_null_term_target")) + test_strncmp_bad_not_null_term_target(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c new file mode 100644 index 000000000..eea274110 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * End-to-end eBPF tunnel test suite + * The file tests BPF network tunnel implementation. + * + * Topology: + * --------- + * root namespace | at_ns0 namespace + * | + * ----------- | ----------- + * | tnl dev | | | tnl dev | (overlay network) + * ----------- | ----------- + * metadata-mode | metadata-mode + * with bpf | with bpf + * | + * ---------- | ---------- + * | veth1 | --------- | veth0 | (underlay network) + * ---------- peer ---------- + * + * + * Device Configuration + * -------------------- + * root namespace with metadata-mode tunnel + BPF + * Device names and addresses: + * veth1 IP 1: 172.16.1.200, IPv6: 00::22 (underlay) + * IP 2: 172.16.1.20, IPv6: 00::bb (underlay) + * tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay) + * + * Namespace at_ns0 with native tunnel + * Device names and addresses: + * veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay) + * tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay) + * + * + * End-to-end ping packet flow + * --------------------------- + * Most of the tests start by namespace creation, device configuration, + * then ping the underlay and overlay network. When doing 'ping 10.1.1.100' + * from root namespace, the following operations happen: + * 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev. + * 2) Tnl device's egress BPF program is triggered and set the tunnel metadata, + * with local_ip=172.16.1.200, remote_ip=172.16.1.100. BPF program choose + * the primary or secondary ip of veth1 as the local ip of tunnel. The + * choice is made based on the value of bpf map local_ip_map. + * 3) Outer tunnel header is prepended and route the packet to veth1's egress. + * 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0. + * 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet. + * 6) Forward the packet to the overlay tnl dev. + */ + +#include <arpa/inet.h> +#include <linux/if_tun.h> +#include <linux/limits.h> +#include <linux/sysctl.h> +#include <linux/time_types.h> +#include <linux/net_tstamp.h> +#include <net/if.h> +#include <stdbool.h> +#include <stdio.h> +#include <sys/stat.h> +#include <unistd.h> + +#include "test_progs.h" +#include "network_helpers.h" +#include "test_tunnel_kern.skel.h" + +#define IP4_ADDR_VETH0 "172.16.1.100" +#define IP4_ADDR1_VETH1 "172.16.1.200" +#define IP4_ADDR2_VETH1 "172.16.1.20" +#define IP4_ADDR_TUNL_DEV0 "10.1.1.100" +#define IP4_ADDR_TUNL_DEV1 "10.1.1.200" + +#define IP6_ADDR_VETH0 "::11" +#define IP6_ADDR1_VETH1 "::22" +#define IP6_ADDR2_VETH1 "::bb" + +#define IP4_ADDR1_HEX_VETH1 0xac1001c8 +#define IP4_ADDR2_HEX_VETH1 0xac100114 +#define IP6_ADDR1_HEX_VETH1 0x22 +#define IP6_ADDR2_HEX_VETH1 0xbb + +#define MAC_TUNL_DEV0 "52:54:00:d9:01:00" +#define MAC_TUNL_DEV1 "52:54:00:d9:02:00" +#define MAC_VETH1 "52:54:00:d9:03:00" + +#define VXLAN_TUNL_DEV0 "vxlan00" +#define VXLAN_TUNL_DEV1 "vxlan11" +#define IP6VXLAN_TUNL_DEV0 "ip6vxlan00" +#define IP6VXLAN_TUNL_DEV1 "ip6vxlan11" + +#define PING_ARGS "-i 0.01 -c 3 -w 10 -q" + +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto fail; \ + }) + +#define SYS_NOFAIL(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + system(cmd); \ + }) + +static int config_device(void) +{ + SYS("ip netns add at_ns0"); + SYS("ip link add veth0 address " MAC_VETH1 " type veth peer name veth1"); + SYS("ip link set veth0 netns at_ns0"); + SYS("ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1"); + SYS("ip link set dev veth1 up mtu 1500"); + SYS("ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0"); + SYS("ip netns exec at_ns0 ip link set dev veth0 up mtu 1500"); + + return 0; +fail: + return -1; +} + +static void cleanup(void) +{ + SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0"); + SYS_NOFAIL("ip link del veth1 2> /dev/null"); + SYS_NOFAIL("ip link del %s 2> /dev/null", VXLAN_TUNL_DEV1); + SYS_NOFAIL("ip link del %s 2> /dev/null", IP6VXLAN_TUNL_DEV1); +} + +static int add_vxlan_tunnel(void) +{ + /* at_ns0 namespace */ + SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external gbp dstport 4789", + VXLAN_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip link set dev %s address %s up", + VXLAN_TUNL_DEV0, MAC_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip addr add dev %s %s/24", + VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s", + IP4_ADDR_TUNL_DEV1, MAC_TUNL_DEV1, VXLAN_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev veth0", + IP4_ADDR2_VETH1, MAC_VETH1); + + /* root namespace */ + SYS("ip link add dev %s type vxlan external gbp dstport 4789", + VXLAN_TUNL_DEV1); + SYS("ip link set dev %s address %s up", VXLAN_TUNL_DEV1, MAC_TUNL_DEV1); + SYS("ip addr add dev %s %s/24", VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); + SYS("ip neigh add %s lladdr %s dev %s", + IP4_ADDR_TUNL_DEV0, MAC_TUNL_DEV0, VXLAN_TUNL_DEV1); + + return 0; +fail: + return -1; +} + +static void delete_vxlan_tunnel(void) +{ + SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s", + VXLAN_TUNL_DEV0); + SYS_NOFAIL("ip link delete dev %s", VXLAN_TUNL_DEV1); +} + +static int add_ip6vxlan_tunnel(void) +{ + SYS("ip netns exec at_ns0 ip -6 addr add %s/96 dev veth0", + IP6_ADDR_VETH0); + SYS("ip netns exec at_ns0 ip link set dev veth0 up"); + SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR1_VETH1); + SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR2_VETH1); + SYS("ip link set dev veth1 up"); + + /* at_ns0 namespace */ + SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external dstport 4789", + IP6VXLAN_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip addr add dev %s %s/24", + IP6VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip link set dev %s address %s up", + IP6VXLAN_TUNL_DEV0, MAC_TUNL_DEV0); + + /* root namespace */ + SYS("ip link add dev %s type vxlan external dstport 4789", + IP6VXLAN_TUNL_DEV1); + SYS("ip addr add dev %s %s/24", IP6VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); + SYS("ip link set dev %s address %s up", + IP6VXLAN_TUNL_DEV1, MAC_TUNL_DEV1); + + return 0; +fail: + return -1; +} + +static void delete_ip6vxlan_tunnel(void) +{ + SYS_NOFAIL("ip netns exec at_ns0 ip -6 addr delete %s/96 dev veth0", + IP6_ADDR_VETH0); + SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR1_VETH1); + SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR2_VETH1); + SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s", + IP6VXLAN_TUNL_DEV0); + SYS_NOFAIL("ip link delete dev %s", IP6VXLAN_TUNL_DEV1); +} + +static int test_ping(int family, const char *addr) +{ + SYS("%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr); + return 0; +fail: + return -1; +} + +static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1, + .priority = 1, .prog_fd = igr_fd); + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1, + .priority = 1, .prog_fd = egr_fd); + int ret; + + ret = bpf_tc_hook_create(hook); + if (!ASSERT_OK(ret, "create tc hook")) + return ret; + + if (igr_fd >= 0) { + hook->attach_point = BPF_TC_INGRESS; + ret = bpf_tc_attach(hook, &opts1); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(hook); + return ret; + } + } + + if (egr_fd >= 0) { + hook->attach_point = BPF_TC_EGRESS; + ret = bpf_tc_attach(hook, &opts2); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(hook); + return ret; + } + } + + return 0; +} + +static void test_vxlan_tunnel(void) +{ + struct test_tunnel_kern *skel = NULL; + struct nstoken *nstoken; + int local_ip_map_fd = -1; + int set_src_prog_fd, get_src_prog_fd; + int set_dst_prog_fd; + int key = 0, ifindex = -1; + uint local_ip; + int err; + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + /* add vxlan tunnel */ + err = add_vxlan_tunnel(); + if (!ASSERT_OK(err, "add vxlan tunnel")) + goto done; + + /* load and attach bpf prog to tunnel dev tc hook point */ + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + goto done; + ifindex = if_nametoindex(VXLAN_TUNL_DEV1); + if (!ASSERT_NEQ(ifindex, 0, "vxlan11 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + get_src_prog_fd = bpf_program__fd(skel->progs.vxlan_get_tunnel_src); + set_src_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_src); + if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + goto done; + + /* load and attach bpf prog to veth dev tc hook point */ + ifindex = if_nametoindex("veth1"); + if (!ASSERT_NEQ(ifindex, 0, "veth1 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + set_dst_prog_fd = bpf_program__fd(skel->progs.veth_set_outer_dst); + if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, set_dst_prog_fd, -1)) + goto done; + + /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ + nstoken = open_netns("at_ns0"); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + ifindex = if_nametoindex(VXLAN_TUNL_DEV0); + if (!ASSERT_NEQ(ifindex, 0, "vxlan00 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + set_dst_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_dst); + if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd)) + goto done; + close_netns(nstoken); + + /* use veth1 ip 2 as tunnel source ip */ + local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map); + if (!ASSERT_GE(local_ip_map_fd, 0, "bpf_map__fd")) + goto done; + local_ip = IP4_ADDR2_HEX_VETH1; + err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY); + if (!ASSERT_OK(err, "update bpf local_ip_map")) + goto done; + + /* ping test */ + err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); + if (!ASSERT_OK(err, "test_ping")) + goto done; + +done: + /* delete vxlan tunnel */ + delete_vxlan_tunnel(); + if (local_ip_map_fd >= 0) + close(local_ip_map_fd); + if (skel) + test_tunnel_kern__destroy(skel); +} + +static void test_ip6vxlan_tunnel(void) +{ + struct test_tunnel_kern *skel = NULL; + struct nstoken *nstoken; + int local_ip_map_fd = -1; + int set_src_prog_fd, get_src_prog_fd; + int set_dst_prog_fd; + int key = 0, ifindex = -1; + uint local_ip; + int err; + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + /* add vxlan tunnel */ + err = add_ip6vxlan_tunnel(); + if (!ASSERT_OK(err, "add_ip6vxlan_tunnel")) + goto done; + + /* load and attach bpf prog to tunnel dev tc hook point */ + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + goto done; + ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV1); + if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan11 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + get_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_get_tunnel_src); + set_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_src); + if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + goto done; + + /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ + nstoken = open_netns("at_ns0"); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV0); + if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan00 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + set_dst_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_dst); + if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd)) + goto done; + close_netns(nstoken); + + /* use veth1 ip 2 as tunnel source ip */ + local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map); + if (!ASSERT_GE(local_ip_map_fd, 0, "get local_ip_map fd")) + goto done; + local_ip = IP6_ADDR2_HEX_VETH1; + err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY); + if (!ASSERT_OK(err, "update bpf local_ip_map")) + goto done; + + /* ping test */ + err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); + if (!ASSERT_OK(err, "test_ping")) + goto done; + +done: + /* delete ipv6 vxlan tunnel */ + delete_ip6vxlan_tunnel(); + if (local_ip_map_fd >= 0) + close(local_ip_map_fd); + if (skel) + test_tunnel_kern__destroy(skel); +} + +#define RUN_TEST(name) \ + ({ \ + if (test__start_subtest(#name)) { \ + test_ ## name(); \ + } \ + }) + +static void *test_tunnel_run_tests(void *arg) +{ + cleanup(); + config_device(); + + RUN_TEST(vxlan_tunnel); + RUN_TEST(ip6vxlan_tunnel); + + cleanup(); + + return NULL; +} + +void serial_test_tunnel(void) +{ + pthread_t test_thread; + int err; + + /* Run the tests in their own thread to isolate the namespace changes + * so they do not affect the environment of other tests. + * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) + */ + err = pthread_create(&test_thread, NULL, &test_tunnel_run_tests, NULL); + if (ASSERT_OK(err, "pthread_create")) + ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/time_tai.c b/tools/testing/selftests/bpf/prog_tests/time_tai.c new file mode 100644 index 000000000..f45af1b0e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/time_tai.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022 Linutronix GmbH */ + +#include <test_progs.h> +#include <network_helpers.h> + +#include "test_time_tai.skel.h" + +#include <time.h> +#include <stdint.h> + +#define TAI_THRESHOLD 1000000000ULL /* 1s */ +#define NSEC_PER_SEC 1000000000ULL + +static __u64 ts_to_ns(const struct timespec *ts) +{ + return ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec; +} + +void test_time_tai(void) +{ + struct __sk_buff skb = { + .cb[0] = 0, + .cb[1] = 0, + .tstamp = 0, + }; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + .ctx_out = &skb, + .ctx_size_out = sizeof(skb), + ); + struct test_time_tai *skel; + struct timespec now_tai; + __u64 ts1, ts2, now; + int ret, prog_fd; + + /* Open and load */ + skel = test_time_tai__open_and_load(); + if (!ASSERT_OK_PTR(skel, "tai_open")) + return; + + /* Run test program */ + prog_fd = bpf_program__fd(skel->progs.time_tai); + ret = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(ret, "test_run"); + + /* Retrieve generated TAI timestamps */ + ts1 = skb.tstamp; + ts2 = skb.cb[0] | ((__u64)skb.cb[1] << 32); + + /* TAI != 0 */ + ASSERT_NEQ(ts1, 0, "tai_ts1"); + ASSERT_NEQ(ts2, 0, "tai_ts2"); + + /* TAI is moving forward only */ + ASSERT_GE(ts2, ts1, "tai_forward"); + + /* Check for future */ + ret = clock_gettime(CLOCK_TAI, &now_tai); + ASSERT_EQ(ret, 0, "tai_gettime"); + now = ts_to_ns(&now_tai); + + ASSERT_TRUE(now > ts1, "tai_future_ts1"); + ASSERT_TRUE(now > ts2, "tai_future_ts2"); + + /* Check for reasonable range */ + ASSERT_TRUE(now - ts1 < TAI_THRESHOLD, "tai_range_ts1"); + ASSERT_TRUE(now - ts2 < TAI_THRESHOLD, "tai_range_ts2"); + + test_time_tai__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c new file mode 100644 index 000000000..7eb049214 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/timer.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "timer.skel.h" + +static int timer(struct timer *timer_skel) +{ + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + err = timer__attach(timer_skel); + if (!ASSERT_OK(err, "timer_attach")) + return err; + + ASSERT_EQ(timer_skel->data->callback_check, 52, "callback_check1"); + ASSERT_EQ(timer_skel->data->callback2_check, 52, "callback2_check1"); + + prog_fd = bpf_program__fd(timer_skel->progs.test1); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + timer__detach(timer_skel); + + usleep(50); /* 10 usecs should be enough, but give it extra */ + /* check that timer_cb1() was executed 10+10 times */ + ASSERT_EQ(timer_skel->data->callback_check, 42, "callback_check2"); + ASSERT_EQ(timer_skel->data->callback2_check, 42, "callback2_check2"); + + /* check that timer_cb2() was executed twice */ + ASSERT_EQ(timer_skel->bss->bss_data, 10, "bss_data"); + + /* check that there were no errors in timer execution */ + ASSERT_EQ(timer_skel->bss->err, 0, "err"); + + /* check that code paths completed */ + ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok"); + + return 0; +} + +/* TODO: use pid filtering */ +void serial_test_timer(void) +{ + struct timer *timer_skel = NULL; + int err; + + timer_skel = timer__open_and_load(); + if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load")) + goto cleanup; + + err = timer(timer_skel); + ASSERT_OK(err, "timer"); +cleanup: + timer__destroy(timer_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/timer_crash.c b/tools/testing/selftests/bpf/prog_tests/timer_crash.c new file mode 100644 index 000000000..f74b82305 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/timer_crash.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "timer_crash.skel.h" + +enum { + MODE_ARRAY, + MODE_HASH, +}; + +static void test_timer_crash_mode(int mode) +{ + struct timer_crash *skel; + + skel = timer_crash__open_and_load(); + if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load")) + return; + skel->bss->pid = getpid(); + skel->bss->crash_map = mode; + if (!ASSERT_OK(timer_crash__attach(skel), "timer_crash__attach")) + goto end; + usleep(1); +end: + timer_crash__destroy(skel); +} + +void test_timer_crash(void) +{ + if (test__start_subtest("array")) + test_timer_crash_mode(MODE_ARRAY); + if (test__start_subtest("hash")) + test_timer_crash_mode(MODE_HASH); +} diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c new file mode 100644 index 000000000..9ff784390 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "timer_mim.skel.h" +#include "timer_mim_reject.skel.h" + +static int timer_mim(struct timer_mim *timer_skel) +{ + __u64 cnt1, cnt2; + int err, prog_fd, key1 = 1; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + err = timer_mim__attach(timer_skel); + if (!ASSERT_OK(err, "timer_attach")) + return err; + + prog_fd = bpf_program__fd(timer_skel->progs.test1); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + timer_mim__detach(timer_skel); + + /* check that timer_cb[12] are incrementing 'cnt' */ + cnt1 = READ_ONCE(timer_skel->bss->cnt); + for (int i = 0; i < 100; i++) { + cnt2 = READ_ONCE(timer_skel->bss->cnt); + if (cnt2 != cnt1) + break; + usleep(200); /* 100 times more than interval */ + } + ASSERT_GT(cnt2, cnt1, "cnt"); + + ASSERT_EQ(timer_skel->bss->err, 0, "err"); + /* check that code paths completed */ + ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok"); + + close(bpf_map__fd(timer_skel->maps.inner_htab)); + err = bpf_map__delete_elem(timer_skel->maps.outer_arr, &key1, sizeof(key1), 0); + ASSERT_EQ(err, 0, "delete inner map"); + + /* check that timer_cb[12] are no longer running */ + cnt1 = READ_ONCE(timer_skel->bss->cnt); + for (int i = 0; i < 100; i++) { + usleep(200); /* 100 times more than interval */ + cnt2 = READ_ONCE(timer_skel->bss->cnt); + if (cnt2 == cnt1) + break; + } + ASSERT_EQ(cnt2, cnt1, "cnt"); + + return 0; +} + +void serial_test_timer_mim(void) +{ + struct timer_mim_reject *timer_reject_skel = NULL; + libbpf_print_fn_t old_print_fn = NULL; + struct timer_mim *timer_skel = NULL; + int err; + + old_print_fn = libbpf_set_print(NULL); + timer_reject_skel = timer_mim_reject__open_and_load(); + libbpf_set_print(old_print_fn); + if (!ASSERT_ERR_PTR(timer_reject_skel, "timer_reject_skel_load")) + goto cleanup; + + timer_skel = timer_mim__open_and_load(); + if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load")) + goto cleanup; + + err = timer_mim(timer_skel); + ASSERT_OK(err, "timer_mim"); +cleanup: + timer_mim__destroy(timer_skel); + timer_mim_reject__destroy(timer_reject_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c new file mode 100644 index 000000000..a47908053 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void serial_test_tp_attach_query(void) +{ + const int num_progs = 3; + int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs]; + __u32 duration = 0, info_len, saved_prog_ids[num_progs]; + const char *file = "./test_tracepoint.bpf.o"; + struct perf_event_query_bpf *query; + struct perf_event_attr attr = {}; + struct bpf_object *obj[num_progs]; + struct bpf_prog_info prog_info; + char buf[256]; + + for (i = 0; i < num_progs; i++) + obj[i] = NULL; + + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); + efd = open(buf, O_RDONLY, 0); + if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + return; + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (CHECK(bytes <= 0 || bytes >= sizeof(buf), + "read", "bytes %d errno %d\n", bytes, errno)) + return; + + attr.config = strtol(buf, NULL, 0); + attr.type = PERF_TYPE_TRACEPOINT; + attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; + attr.sample_period = 1; + attr.wakeup_events = 1; + + query = malloc(sizeof(*query) + sizeof(__u32) * num_progs); + for (i = 0; i < num_progs; i++) { + err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i], + &prog_fd[i]); + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + goto cleanup1; + + bzero(&prog_info, sizeof(prog_info)); + prog_info.jited_prog_len = 0; + prog_info.xlated_prog_len = 0; + prog_info.nr_map_ids = 0; + info_len = sizeof(prog_info); + err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len); + if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n", + err, errno)) + goto cleanup1; + saved_prog_ids[i] = prog_info.id; + + pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd[i], errno)) + goto cleanup2; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", + err, errno)) + goto cleanup3; + + if (i == 0) { + /* check NULL prog array query */ + query->ids_len = num_progs; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); + if (CHECK(err || query->prog_cnt != 0, + "perf_event_ioc_query_bpf", + "err %d errno %d query->prog_cnt %u\n", + err, errno, query->prog_cnt)) + goto cleanup3; + } + + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]); + if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", + err, errno)) + goto cleanup3; + + if (i == 1) { + /* try to get # of programs only */ + query->ids_len = 0; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); + if (CHECK(err || query->prog_cnt != 2, + "perf_event_ioc_query_bpf", + "err %d errno %d query->prog_cnt %u\n", + err, errno, query->prog_cnt)) + goto cleanup3; + + /* try a few negative tests */ + /* invalid query pointer */ + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, + (struct perf_event_query_bpf *)0x1); + if (CHECK(!err || errno != EFAULT, + "perf_event_ioc_query_bpf", + "err %d errno %d\n", err, errno)) + goto cleanup3; + + /* no enough space */ + query->ids_len = 1; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); + if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2, + "perf_event_ioc_query_bpf", + "err %d errno %d query->prog_cnt %u\n", + err, errno, query->prog_cnt)) + goto cleanup3; + } + + query->ids_len = num_progs; + err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); + if (CHECK(err || query->prog_cnt != (i + 1), + "perf_event_ioc_query_bpf", + "err %d errno %d query->prog_cnt %u\n", + err, errno, query->prog_cnt)) + goto cleanup3; + for (j = 0; j < i + 1; j++) + if (CHECK(saved_prog_ids[j] != query->ids[j], + "perf_event_ioc_query_bpf", + "#%d saved_prog_id %x query prog_id %x\n", + j, saved_prog_ids[j], query->ids[j])) + goto cleanup3; + } + + i = num_progs - 1; + for (; i >= 0; i--) { + cleanup3: + ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE); + cleanup2: + close(pmu_fd[i]); + cleanup1: + bpf_object__close(obj[i]); + } + free(query); +} diff --git a/tools/testing/selftests/bpf/prog_tests/trace_ext.c b/tools/testing/selftests/bpf/prog_tests/trace_ext.c new file mode 100644 index 000000000..aabdff7be --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/trace_ext.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE +#include <test_progs.h> +#include <network_helpers.h> +#include <sys/stat.h> +#include <linux/sched.h> +#include <sys/syscall.h> + +#include "test_pkt_md_access.skel.h" +#include "test_trace_ext.skel.h" +#include "test_trace_ext_tracing.skel.h" + +static __u32 duration; + +void test_trace_ext(void) +{ + struct test_pkt_md_access *skel_pkt = NULL; + struct test_trace_ext_tracing *skel_trace = NULL; + struct test_trace_ext_tracing__bss *bss_trace; + struct test_trace_ext *skel_ext = NULL; + struct test_trace_ext__bss *bss_ext; + int err, pkt_fd, ext_fd; + struct bpf_program *prog; + char buf[100]; + __u64 len; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + /* open/load/attach test_pkt_md_access */ + skel_pkt = test_pkt_md_access__open_and_load(); + if (CHECK(!skel_pkt, "setup", "classifier/test_pkt_md_access open failed\n")) + goto cleanup; + + err = test_pkt_md_access__attach(skel_pkt); + if (CHECK(err, "setup", "classifier/test_pkt_md_access attach failed: %d\n", err)) + goto cleanup; + + prog = skel_pkt->progs.test_pkt_md_access; + pkt_fd = bpf_program__fd(prog); + + /* open extension */ + skel_ext = test_trace_ext__open(); + if (CHECK(!skel_ext, "setup", "freplace/test_pkt_md_access open failed\n")) + goto cleanup; + + /* set extension's attach target - test_pkt_md_access */ + prog = skel_ext->progs.test_pkt_md_access_new; + bpf_program__set_attach_target(prog, pkt_fd, "test_pkt_md_access"); + + /* load/attach extension */ + err = test_trace_ext__load(skel_ext); + if (CHECK(err, "setup", "freplace/test_pkt_md_access load failed\n")) { + libbpf_strerror(err, buf, sizeof(buf)); + fprintf(stderr, "%s\n", buf); + goto cleanup; + } + + err = test_trace_ext__attach(skel_ext); + if (CHECK(err, "setup", "freplace/test_pkt_md_access attach failed: %d\n", err)) + goto cleanup; + + prog = skel_ext->progs.test_pkt_md_access_new; + ext_fd = bpf_program__fd(prog); + + /* open tracing */ + skel_trace = test_trace_ext_tracing__open(); + if (CHECK(!skel_trace, "setup", "tracing/test_pkt_md_access_new open failed\n")) + goto cleanup; + + /* set tracing's attach target - fentry */ + prog = skel_trace->progs.fentry; + bpf_program__set_attach_target(prog, ext_fd, "test_pkt_md_access_new"); + + /* set tracing's attach target - fexit */ + prog = skel_trace->progs.fexit; + bpf_program__set_attach_target(prog, ext_fd, "test_pkt_md_access_new"); + + /* load/attach tracing */ + err = test_trace_ext_tracing__load(skel_trace); + if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new load")) { + libbpf_strerror(err, buf, sizeof(buf)); + fprintf(stderr, "%s\n", buf); + goto cleanup; + } + + err = test_trace_ext_tracing__attach(skel_trace); + if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new attach")) + goto cleanup; + + /* trigger the test */ + err = bpf_prog_test_run_opts(pkt_fd, &topts); + ASSERT_OK(err, "test_run_opts err"); + ASSERT_OK(topts.retval, "test_run_opts retval"); + + bss_ext = skel_ext->bss; + bss_trace = skel_trace->bss; + + len = bss_ext->ext_called; + + ASSERT_NEQ(bss_ext->ext_called, 0, + "failed to trigger freplace/test_pkt_md_access"); + ASSERT_EQ(bss_trace->fentry_called, len, + "failed to trigger fentry/test_pkt_md_access_new"); + ASSERT_EQ(bss_trace->fexit_called, len, + "failed to trigger fexit/test_pkt_md_access_new"); + +cleanup: + test_trace_ext_tracing__destroy(skel_trace); + test_trace_ext__destroy(skel_ext); + test_pkt_md_access__destroy(skel_pkt); +} diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c new file mode 100644 index 000000000..cade7f123 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Oracle and/or its affiliates. */ + +#include <test_progs.h> + +#include "trace_printk.lskel.h" + +#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" +#define SEARCHMSG "testing,testing" + +void serial_test_trace_printk(void) +{ + struct trace_printk_lskel__bss *bss; + int err = 0, iter = 0, found = 0; + struct trace_printk_lskel *skel; + char *buf = NULL; + FILE *fp = NULL; + size_t buflen; + + skel = trace_printk_lskel__open(); + if (!ASSERT_OK_PTR(skel, "trace_printk__open")) + return; + + ASSERT_EQ(skel->rodata->fmt[0], 'T', "skel->rodata->fmt[0]"); + skel->rodata->fmt[0] = 't'; + + err = trace_printk_lskel__load(skel); + if (!ASSERT_OK(err, "trace_printk__load")) + goto cleanup; + + bss = skel->bss; + + err = trace_printk_lskel__attach(skel); + if (!ASSERT_OK(err, "trace_printk__attach")) + goto cleanup; + + fp = fopen(TRACEBUF, "r"); + if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)")) + goto cleanup; + + /* We do not want to wait forever if this test fails... */ + fcntl(fileno(fp), F_SETFL, O_NONBLOCK); + + /* wait for tracepoint to trigger */ + usleep(1); + trace_printk_lskel__detach(skel); + + if (!ASSERT_GT(bss->trace_printk_ran, 0, "bss->trace_printk_ran")) + goto cleanup; + + if (!ASSERT_GT(bss->trace_printk_ret, 0, "bss->trace_printk_ret")) + goto cleanup; + + /* verify our search string is in the trace buffer */ + while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) { + if (strstr(buf, SEARCHMSG) != NULL) + found++; + if (found == bss->trace_printk_ran) + break; + if (++iter > 1000) + break; + } + + if (!ASSERT_EQ(found, bss->trace_printk_ran, "found")) + goto cleanup; + +cleanup: + trace_printk_lskel__destroy(skel); + free(buf); + if (fp) + fclose(fp); +} diff --git a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c new file mode 100644 index 000000000..7a4e313e8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> + +#include "trace_vprintk.lskel.h" + +#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" +#define SEARCHMSG "1,2,3,4,5,6,7,8,9,10" + +void serial_test_trace_vprintk(void) +{ + struct trace_vprintk_lskel__bss *bss; + int err = 0, iter = 0, found = 0; + struct trace_vprintk_lskel *skel; + char *buf = NULL; + FILE *fp = NULL; + size_t buflen; + + skel = trace_vprintk_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load")) + goto cleanup; + + bss = skel->bss; + + err = trace_vprintk_lskel__attach(skel); + if (!ASSERT_OK(err, "trace_vprintk__attach")) + goto cleanup; + + fp = fopen(TRACEBUF, "r"); + if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)")) + goto cleanup; + + /* We do not want to wait forever if this test fails... */ + fcntl(fileno(fp), F_SETFL, O_NONBLOCK); + + /* wait for tracepoint to trigger */ + usleep(1); + trace_vprintk_lskel__detach(skel); + + if (!ASSERT_GT(bss->trace_vprintk_ran, 0, "bss->trace_vprintk_ran")) + goto cleanup; + + if (!ASSERT_GT(bss->trace_vprintk_ret, 0, "bss->trace_vprintk_ret")) + goto cleanup; + + /* verify our search string is in the trace buffer */ + while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) { + if (strstr(buf, SEARCHMSG) != NULL) + found++; + if (found == bss->trace_vprintk_ran) + break; + if (++iter > 1000) + break; + } + + if (!ASSERT_EQ(found, bss->trace_vprintk_ran, "found")) + goto cleanup; + + if (!ASSERT_LT(bss->null_data_vprintk_ret, 0, "bss->null_data_vprintk_ret")) + goto cleanup; + +cleanup: + trace_vprintk_lskel__destroy(skel); + free(buf); + if (fp) + fclose(fp); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c new file mode 100644 index 000000000..48dc9472e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#include <test_progs.h> +#include "tracing_struct.skel.h" + +static void test_fentry(void) +{ + struct tracing_struct *skel; + int err; + + skel = tracing_struct__open_and_load(); + if (!ASSERT_OK_PTR(skel, "tracing_struct__open_and_load")) + return; + + err = tracing_struct__attach(skel); + if (!ASSERT_OK(err, "tracing_struct__attach")) + goto destroy_skel; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + + ASSERT_EQ(skel->bss->t1_a_a, 2, "t1:a.a"); + ASSERT_EQ(skel->bss->t1_a_b, 3, "t1:a.b"); + ASSERT_EQ(skel->bss->t1_b, 1, "t1:b"); + ASSERT_EQ(skel->bss->t1_c, 4, "t1:c"); + + ASSERT_EQ(skel->bss->t1_nregs, 4, "t1 nregs"); + ASSERT_EQ(skel->bss->t1_reg0, 2, "t1 reg0"); + ASSERT_EQ(skel->bss->t1_reg1, 3, "t1 reg1"); + ASSERT_EQ(skel->bss->t1_reg2, 1, "t1 reg2"); + ASSERT_EQ(skel->bss->t1_reg3, 4, "t1 reg3"); + ASSERT_EQ(skel->bss->t1_ret, 10, "t1 ret"); + + ASSERT_EQ(skel->bss->t2_a, 1, "t2:a"); + ASSERT_EQ(skel->bss->t2_b_a, 2, "t2:b.a"); + ASSERT_EQ(skel->bss->t2_b_b, 3, "t2:b.b"); + ASSERT_EQ(skel->bss->t2_c, 4, "t2:c"); + ASSERT_EQ(skel->bss->t2_ret, 10, "t2 ret"); + + ASSERT_EQ(skel->bss->t3_a, 1, "t3:a"); + ASSERT_EQ(skel->bss->t3_b, 4, "t3:b"); + ASSERT_EQ(skel->bss->t3_c_a, 2, "t3:c.a"); + ASSERT_EQ(skel->bss->t3_c_b, 3, "t3:c.b"); + ASSERT_EQ(skel->bss->t3_ret, 10, "t3 ret"); + + ASSERT_EQ(skel->bss->t4_a_a, 10, "t4:a.a"); + ASSERT_EQ(skel->bss->t4_b, 1, "t4:b"); + ASSERT_EQ(skel->bss->t4_c, 2, "t4:c"); + ASSERT_EQ(skel->bss->t4_d, 3, "t4:d"); + ASSERT_EQ(skel->bss->t4_e_a, 2, "t4:e.a"); + ASSERT_EQ(skel->bss->t4_e_b, 3, "t4:e.b"); + ASSERT_EQ(skel->bss->t4_ret, 21, "t4 ret"); + + ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret"); + + tracing_struct__detach(skel); +destroy_skel: + tracing_struct__destroy(skel); +} + +void test_tracing_struct(void) +{ + test_fentry(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c new file mode 100644 index 000000000..564b75bc0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define _GNU_SOURCE +#include <test_progs.h> + +#define MAX_TRAMP_PROGS 38 + +struct inst { + struct bpf_object *obj; + struct bpf_link *link; +}; + +static struct bpf_program *load_prog(char *file, char *name, struct inst *inst) +{ + struct bpf_object *obj; + struct bpf_program *prog; + int err; + + obj = bpf_object__open_file(file, NULL); + if (!ASSERT_OK_PTR(obj, "obj_open_file")) + return NULL; + + inst->obj = obj; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + return NULL; + + prog = bpf_object__find_program_by_name(obj, name); + if (!ASSERT_OK_PTR(prog, "obj_find_prog")) + return NULL; + + return prog; +} + +/* TODO: use different target function to run in concurrent mode */ +void serial_test_trampoline_count(void) +{ + char *file = "test_trampoline_count.bpf.o"; + char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" }; + struct inst inst[MAX_TRAMP_PROGS + 1] = {}; + struct bpf_program *prog; + struct bpf_link *link; + int prog_fd, err, i; + LIBBPF_OPTS(bpf_test_run_opts, opts); + + /* attach 'allowed' trampoline programs */ + for (i = 0; i < MAX_TRAMP_PROGS; i++) { + prog = load_prog(file, progs[i % ARRAY_SIZE(progs)], &inst[i]); + if (!prog) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "attach_prog")) + goto cleanup; + + inst[i].link = link; + } + + /* and try 1 extra.. */ + prog = load_prog(file, "fmod_ret_test", &inst[i]); + if (!prog) + goto cleanup; + + /* ..that needs to fail */ + link = bpf_program__attach(prog); + if (!ASSERT_ERR_PTR(link, "attach_prog")) { + inst[i].link = link; + goto cleanup; + } + + /* with E2BIG error */ + if (!ASSERT_EQ(libbpf_get_error(link), -E2BIG, "E2BIG")) + goto cleanup; + if (!ASSERT_EQ(link, NULL, "ptr_is_null")) + goto cleanup; + + /* and finaly execute the probe */ + prog_fd = bpf_program__fd(prog); + if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) + goto cleanup; + + err = bpf_prog_test_run_opts(prog_fd, &opts); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + goto cleanup; + + ASSERT_EQ(opts.retval & 0xffff, 4, "bpf_modify_return_test.result"); + ASSERT_EQ(opts.retval >> 16, 1, "bpf_modify_return_test.side_effect"); + +cleanup: + for (; i >= 0; i--) { + bpf_link__destroy(inst[i].link); + bpf_object__close(inst[i].obj); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/udp_limit.c b/tools/testing/selftests/bpf/prog_tests/udp_limit.c new file mode 100644 index 000000000..2643d896d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/udp_limit.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "udp_limit.skel.h" + +#include <sys/types.h> +#include <sys/socket.h> + +void test_udp_limit(void) +{ + struct udp_limit *skel; + int fd1 = -1, fd2 = -1; + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/udp_limit"); + if (!ASSERT_GE(cgroup_fd, 0, "cg-join")) + return; + + skel = udp_limit__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel-load")) + goto close_cgroup_fd; + + skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.sock, "cg_attach_sock")) + goto close_skeleton; + skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.sock_release, "cg_attach_sock_release")) + goto close_skeleton; + + /* BPF program enforces a single UDP socket per cgroup, + * verify that. + */ + fd1 = socket(AF_INET, SOCK_DGRAM, 0); + if (!ASSERT_GE(fd1, 0, "socket(fd1)")) + goto close_skeleton; + + fd2 = socket(AF_INET, SOCK_DGRAM, 0); + if (!ASSERT_LT(fd2, 0, "socket(fd2)")) + goto close_skeleton; + + /* We can reopen again after close. */ + close(fd1); + fd1 = -1; + + fd1 = socket(AF_INET, SOCK_DGRAM, 0); + if (!ASSERT_GE(fd1, 0, "socket(fd1-again)")) + goto close_skeleton; + + /* Make sure the program was invoked the expected + * number of times: + * - open fd1 - BPF_CGROUP_INET_SOCK_CREATE + * - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE + * - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE + * - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE + */ + if (!ASSERT_EQ(skel->bss->invocations, 4, "bss-invocations")) + goto close_skeleton; + + /* We should still have a single socket in use */ + if (!ASSERT_EQ(skel->bss->in_use, 1, "bss-in_use")) + goto close_skeleton; + +close_skeleton: + if (fd1 >= 0) + close(fd1); + if (fd2 >= 0) + close(fd2); + udp_limit__destroy(skel); +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c new file mode 100644 index 000000000..1ed3cc209 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Oracle and/or its affiliates. */ + +#include <test_progs.h> +#include <bpf/btf.h> + +#include "test_unpriv_bpf_disabled.skel.h" + +#include "cap_helpers.h" + +/* Using CAP_LAST_CAP is risky here, since it can get pulled in from + * an old /usr/include/linux/capability.h and be < CAP_BPF; as a result + * CAP_BPF would not be included in ALL_CAPS. Instead use CAP_BPF as + * we know its value is correct since it is explicitly defined in + * cap_helpers.h. + */ +#define ALL_CAPS ((2ULL << CAP_BPF) - 1) + +#define PINPATH "/sys/fs/bpf/unpriv_bpf_disabled_" +#define NUM_MAPS 7 + +static __u32 got_perfbuf_val; +static __u32 got_ringbuf_val; + +static int process_ringbuf(void *ctx, void *data, size_t len) +{ + if (ASSERT_EQ(len, sizeof(__u32), "ringbuf_size_valid")) + got_ringbuf_val = *(__u32 *)data; + return 0; +} + +static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len) +{ + if (ASSERT_EQ(len, sizeof(__u32), "perfbuf_size_valid")) + got_perfbuf_val = *(__u32 *)data; +} + +static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val) +{ + int ret = 0; + FILE *fp; + + fp = fopen(sysctl_path, "r+"); + if (!fp) + return -errno; + if (old_val && fscanf(fp, "%s", old_val) <= 0) { + ret = -ENOENT; + } else if (!old_val || strcmp(old_val, new_val) != 0) { + fseek(fp, 0, SEEK_SET); + if (fprintf(fp, "%s", new_val) < 0) + ret = -errno; + } + fclose(fp); + + return ret; +} + +static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel, + __u32 prog_id, int prog_fd, int perf_fd, + char **map_paths, int *map_fds) +{ + struct perf_buffer *perfbuf = NULL; + struct ring_buffer *ringbuf = NULL; + int i, nr_cpus, link_fd = -1; + + nr_cpus = bpf_num_possible_cpus(); + + skel->bss->perfbuf_val = 1; + skel->bss->ringbuf_val = 2; + + /* Positive tests for unprivileged BPF disabled. Verify we can + * - retrieve and interact with pinned maps; + * - set up and interact with perf buffer; + * - set up and interact with ring buffer; + * - create a link + */ + perfbuf = perf_buffer__new(bpf_map__fd(skel->maps.perfbuf), 8, process_perfbuf, NULL, NULL, + NULL); + if (!ASSERT_OK_PTR(perfbuf, "perf_buffer__new")) + goto cleanup; + + ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL); + if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new")) + goto cleanup; + + /* trigger & validate perf event, ringbuf output */ + usleep(1); + + ASSERT_GT(perf_buffer__poll(perfbuf, 100), -1, "perf_buffer__poll"); + ASSERT_EQ(got_perfbuf_val, skel->bss->perfbuf_val, "check_perfbuf_val"); + ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume"); + ASSERT_EQ(got_ringbuf_val, skel->bss->ringbuf_val, "check_ringbuf_val"); + + for (i = 0; i < NUM_MAPS; i++) { + map_fds[i] = bpf_obj_get(map_paths[i]); + if (!ASSERT_GT(map_fds[i], -1, "obj_get")) + goto cleanup; + } + + for (i = 0; i < NUM_MAPS; i++) { + bool prog_array = strstr(map_paths[i], "prog_array") != NULL; + bool array = strstr(map_paths[i], "array") != NULL; + bool buf = strstr(map_paths[i], "buf") != NULL; + __u32 key = 0, vals[nr_cpus], lookup_vals[nr_cpus]; + __u32 expected_val = 1; + int j; + + /* skip ringbuf, perfbuf */ + if (buf) + continue; + + for (j = 0; j < nr_cpus; j++) + vals[j] = expected_val; + + if (prog_array) { + /* need valid prog array value */ + vals[0] = prog_fd; + /* prog array lookup returns prog id, not fd */ + expected_val = prog_id; + } + ASSERT_OK(bpf_map_update_elem(map_fds[i], &key, vals, 0), "map_update_elem"); + ASSERT_OK(bpf_map_lookup_elem(map_fds[i], &key, &lookup_vals), "map_lookup_elem"); + ASSERT_EQ(lookup_vals[0], expected_val, "map_lookup_elem_values"); + if (!array) + ASSERT_OK(bpf_map_delete_elem(map_fds[i], &key), "map_delete_elem"); + } + + link_fd = bpf_link_create(bpf_program__fd(skel->progs.handle_perf_event), perf_fd, + BPF_PERF_EVENT, NULL); + ASSERT_GT(link_fd, 0, "link_create"); + +cleanup: + if (link_fd) + close(link_fd); + if (perfbuf) + perf_buffer__free(perfbuf); + if (ringbuf) + ring_buffer__free(ringbuf); +} + +static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *skel, + __u32 prog_id, int prog_fd, int perf_fd, + char **map_paths, int *map_fds) +{ + const struct bpf_insn prog_insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, load_opts); + struct bpf_map_info map_info = {}; + __u32 map_info_len = sizeof(map_info); + struct bpf_link_info link_info = {}; + __u32 link_info_len = sizeof(link_info); + struct btf *btf = NULL; + __u32 attach_flags = 0; + __u32 prog_ids[3] = {}; + __u32 prog_cnt = 3; + __u32 next; + int i; + + /* Negative tests for unprivileged BPF disabled. Verify we cannot + * - load BPF programs; + * - create BPF maps; + * - get a prog/map/link fd by id; + * - get next prog/map/link id + * - query prog + * - BTF load + */ + ASSERT_EQ(bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "simple_prog", "GPL", + prog_insns, prog_insn_cnt, &load_opts), + -EPERM, "prog_load_fails"); + + for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++) + ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL), + -EPERM, "map_create_fails"); + + ASSERT_EQ(bpf_prog_get_fd_by_id(prog_id), -EPERM, "prog_get_fd_by_id_fails"); + ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails"); + ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails"); + + if (ASSERT_OK(bpf_obj_get_info_by_fd(map_fds[0], &map_info, &map_info_len), + "obj_get_info_by_fd")) { + ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails"); + ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM, + "map_get_next_id_fails"); + } + ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails"); + + if (ASSERT_OK(bpf_obj_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter), + &link_info, &link_info_len), + "obj_get_info_by_fd")) { + ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails"); + ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM, + "link_get_next_id_fails"); + } + ASSERT_EQ(bpf_link_get_next_id(0, &next), -EPERM, "link_get_next_id_fails"); + + ASSERT_EQ(bpf_prog_query(prog_fd, BPF_TRACE_FENTRY, 0, &attach_flags, prog_ids, + &prog_cnt), -EPERM, "prog_query_fails"); + + btf = btf__new_empty(); + if (ASSERT_OK_PTR(btf, "empty_btf") && + ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "unpriv_int_type")) { + const void *raw_btf_data; + __u32 raw_btf_size; + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good")) + ASSERT_EQ(bpf_btf_load(raw_btf_data, raw_btf_size, NULL), -EPERM, + "bpf_btf_load_fails"); + } + btf__free(btf); +} + +void test_unpriv_bpf_disabled(void) +{ + char *map_paths[NUM_MAPS] = { PINPATH "array", + PINPATH "percpu_array", + PINPATH "hash", + PINPATH "percpu_hash", + PINPATH "perfbuf", + PINPATH "ringbuf", + PINPATH "prog_array" }; + int map_fds[NUM_MAPS]; + struct test_unpriv_bpf_disabled *skel; + char unprivileged_bpf_disabled_orig[32] = {}; + char perf_event_paranoid_orig[32] = {}; + struct bpf_prog_info prog_info = {}; + __u32 prog_info_len = sizeof(prog_info); + struct perf_event_attr attr = {}; + int prog_fd, perf_fd = -1, i, ret; + __u64 save_caps = 0; + __u32 prog_id; + + skel = test_unpriv_bpf_disabled__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->test_pid = getpid(); + + map_fds[0] = bpf_map__fd(skel->maps.array); + map_fds[1] = bpf_map__fd(skel->maps.percpu_array); + map_fds[2] = bpf_map__fd(skel->maps.hash); + map_fds[3] = bpf_map__fd(skel->maps.percpu_hash); + map_fds[4] = bpf_map__fd(skel->maps.perfbuf); + map_fds[5] = bpf_map__fd(skel->maps.ringbuf); + map_fds[6] = bpf_map__fd(skel->maps.prog_array); + + for (i = 0; i < NUM_MAPS; i++) + ASSERT_OK(bpf_obj_pin(map_fds[i], map_paths[i]), "pin map_fd"); + + /* allow user without caps to use perf events */ + if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/perf_event_paranoid", perf_event_paranoid_orig, + "-1"), + "set_perf_event_paranoid")) + goto cleanup; + /* ensure unprivileged bpf disabled is set */ + ret = sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", + unprivileged_bpf_disabled_orig, "2"); + if (ret == -EPERM) { + /* if unprivileged_bpf_disabled=1, we get -EPERM back; that's okay. */ + if (!ASSERT_OK(strcmp(unprivileged_bpf_disabled_orig, "1"), + "unprivileged_bpf_disabled_on")) + goto cleanup; + } else { + if (!ASSERT_OK(ret, "set unprivileged_bpf_disabled")) + goto cleanup; + } + + prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter); + ASSERT_OK(bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len), + "obj_get_info_by_fd"); + prog_id = prog_info.id; + ASSERT_GT(prog_id, 0, "valid_prog_id"); + + attr.size = sizeof(attr); + attr.type = PERF_TYPE_SOFTWARE; + attr.config = PERF_COUNT_SW_CPU_CLOCK; + attr.freq = 1; + attr.sample_freq = 1000; + perf_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); + if (!ASSERT_GE(perf_fd, 0, "perf_fd")) + goto cleanup; + + if (!ASSERT_OK(test_unpriv_bpf_disabled__attach(skel), "skel_attach")) + goto cleanup; + + if (!ASSERT_OK(cap_disable_effective(ALL_CAPS, &save_caps), "disable caps")) + goto cleanup; + + if (test__start_subtest("unpriv_bpf_disabled_positive")) + test_unpriv_bpf_disabled_positive(skel, prog_id, prog_fd, perf_fd, map_paths, + map_fds); + + if (test__start_subtest("unpriv_bpf_disabled_negative")) + test_unpriv_bpf_disabled_negative(skel, prog_id, prog_fd, perf_fd, map_paths, + map_fds); + +cleanup: + close(perf_fd); + if (save_caps) + cap_enable_effective(save_caps, NULL); + if (strlen(perf_event_paranoid_orig) > 0) + sysctl_set("/proc/sys/kernel/perf_event_paranoid", NULL, perf_event_paranoid_orig); + if (strlen(unprivileged_bpf_disabled_orig) > 0) + sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", NULL, + unprivileged_bpf_disabled_orig); + for (i = 0; i < NUM_MAPS; i++) + unlink(map_paths[i]); + test_unpriv_bpf_disabled__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c new file mode 100644 index 000000000..35b87c7ba --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Oracle and/or its affiliates. */ + +#include <test_progs.h> +#include "test_uprobe_autoattach.skel.h" + +/* uprobe attach point */ +static noinline int autoattach_trigger_func(int arg) +{ + asm volatile (""); + return arg + 1; +} + +void test_uprobe_autoattach(void) +{ + struct test_uprobe_autoattach *skel; + int trigger_val = 100, trigger_ret; + size_t malloc_sz = 1; + char *mem; + + skel = test_uprobe_autoattach__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + if (!ASSERT_OK(test_uprobe_autoattach__attach(skel), "skel_attach")) + goto cleanup; + + skel->bss->test_pid = getpid(); + + /* trigger & validate uprobe & uretprobe */ + trigger_ret = autoattach_trigger_func(trigger_val); + + skel->bss->test_pid = getpid(); + + /* trigger & validate shared library u[ret]probes attached by name */ + mem = malloc(malloc_sz); + + ASSERT_EQ(skel->bss->uprobe_byname_parm1, trigger_val, "check_uprobe_byname_parm1"); + ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran"); + ASSERT_EQ(skel->bss->uretprobe_byname_rc, trigger_ret, "check_uretprobe_byname_rc"); + ASSERT_EQ(skel->bss->uretprobe_byname_ran, 2, "check_uretprobe_byname_ran"); + ASSERT_EQ(skel->bss->uprobe_byname2_parm1, malloc_sz, "check_uprobe_byname2_parm1"); + ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran"); + ASSERT_EQ(skel->bss->uretprobe_byname2_rc, mem, "check_uretprobe_byname2_rc"); + ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran"); + + free(mem); +cleanup: + test_uprobe_autoattach__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c new file mode 100644 index 000000000..9ad9da0f2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/usdt.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> + +#define _SDT_HAS_SEMAPHORES 1 +#include "../sdt.h" + +#include "test_usdt.skel.h" +#include "test_urandom_usdt.skel.h" + +int lets_test_this(int); + +static volatile int idx = 2; +static volatile __u64 bla = 0xFEDCBA9876543210ULL; +static volatile short nums[] = {-1, -2, -3, -4}; + +static volatile struct { + int x; + signed char y; +} t1 = { 1, -127 }; + +#define SEC(name) __attribute__((section(name), used)) + +unsigned short test_usdt0_semaphore SEC(".probes"); +unsigned short test_usdt3_semaphore SEC(".probes"); +unsigned short test_usdt12_semaphore SEC(".probes"); + +static void __always_inline trigger_func(int x) { + long y = 42; + + if (test_usdt0_semaphore) + STAP_PROBE(test, usdt0); + if (test_usdt3_semaphore) + STAP_PROBE3(test, usdt3, x, y, &bla); + if (test_usdt12_semaphore) { + STAP_PROBE12(test, usdt12, + x, x + 1, y, x + y, 5, + y / 7, bla, &bla, -9, nums[x], + nums[idx], t1.y); + } +} + +static void subtest_basic_usdt(void) +{ + LIBBPF_OPTS(bpf_usdt_opts, opts); + struct test_usdt *skel; + struct test_usdt__bss *bss; + int err; + + skel = test_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bss = skel->bss; + bss->my_pid = getpid(); + + err = test_usdt__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* usdt0 won't be auto-attached */ + opts.usdt_cookie = 0xcafedeadbeeffeed; + skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, + 0 /*self*/, "/proc/self/exe", + "test", "usdt0", &opts); + if (!ASSERT_OK_PTR(skel->links.usdt0, "usdt0_link")) + goto cleanup; + + trigger_func(1); + + ASSERT_EQ(bss->usdt0_called, 1, "usdt0_called"); + ASSERT_EQ(bss->usdt3_called, 1, "usdt3_called"); + ASSERT_EQ(bss->usdt12_called, 1, "usdt12_called"); + + ASSERT_EQ(bss->usdt0_cookie, 0xcafedeadbeeffeed, "usdt0_cookie"); + ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt"); + ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret"); + + /* auto-attached usdt3 gets default zero cookie value */ + ASSERT_EQ(bss->usdt3_cookie, 0, "usdt3_cookie"); + ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt"); + + ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret"); + ASSERT_EQ(bss->usdt3_args[0], 1, "usdt3_arg1"); + ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2"); + ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3"); + + /* auto-attached usdt12 gets default zero cookie value */ + ASSERT_EQ(bss->usdt12_cookie, 0, "usdt12_cookie"); + ASSERT_EQ(bss->usdt12_arg_cnt, 12, "usdt12_arg_cnt"); + + ASSERT_EQ(bss->usdt12_args[0], 1, "usdt12_arg1"); + ASSERT_EQ(bss->usdt12_args[1], 1 + 1, "usdt12_arg2"); + ASSERT_EQ(bss->usdt12_args[2], 42, "usdt12_arg3"); + ASSERT_EQ(bss->usdt12_args[3], 42 + 1, "usdt12_arg4"); + ASSERT_EQ(bss->usdt12_args[4], 5, "usdt12_arg5"); + ASSERT_EQ(bss->usdt12_args[5], 42 / 7, "usdt12_arg6"); + ASSERT_EQ(bss->usdt12_args[6], bla, "usdt12_arg7"); + ASSERT_EQ(bss->usdt12_args[7], (uintptr_t)&bla, "usdt12_arg8"); + ASSERT_EQ(bss->usdt12_args[8], -9, "usdt12_arg9"); + ASSERT_EQ(bss->usdt12_args[9], nums[1], "usdt12_arg10"); + ASSERT_EQ(bss->usdt12_args[10], nums[idx], "usdt12_arg11"); + ASSERT_EQ(bss->usdt12_args[11], t1.y, "usdt12_arg12"); + + /* trigger_func() is marked __always_inline, so USDT invocations will be + * inlined in two different places, meaning that each USDT will have + * at least 2 different places to be attached to. This verifies that + * bpf_program__attach_usdt() handles this properly and attaches to + * all possible places of USDT invocation. + */ + trigger_func(2); + + ASSERT_EQ(bss->usdt0_called, 2, "usdt0_called"); + ASSERT_EQ(bss->usdt3_called, 2, "usdt3_called"); + ASSERT_EQ(bss->usdt12_called, 2, "usdt12_called"); + + /* only check values that depend on trigger_func()'s input value */ + ASSERT_EQ(bss->usdt3_args[0], 2, "usdt3_arg1"); + + ASSERT_EQ(bss->usdt12_args[0], 2, "usdt12_arg1"); + ASSERT_EQ(bss->usdt12_args[1], 2 + 1, "usdt12_arg2"); + ASSERT_EQ(bss->usdt12_args[3], 42 + 2, "usdt12_arg4"); + ASSERT_EQ(bss->usdt12_args[9], nums[2], "usdt12_arg10"); + + /* detach and re-attach usdt3 */ + bpf_link__destroy(skel->links.usdt3); + + opts.usdt_cookie = 0xBADC00C51E; + skel->links.usdt3 = bpf_program__attach_usdt(skel->progs.usdt3, -1 /* any pid */, + "/proc/self/exe", "test", "usdt3", &opts); + if (!ASSERT_OK_PTR(skel->links.usdt3, "usdt3_reattach")) + goto cleanup; + + trigger_func(3); + + ASSERT_EQ(bss->usdt3_called, 3, "usdt3_called"); + /* this time usdt3 has custom cookie */ + ASSERT_EQ(bss->usdt3_cookie, 0xBADC00C51E, "usdt3_cookie"); + ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt"); + + ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret"); + ASSERT_EQ(bss->usdt3_args[0], 3, "usdt3_arg1"); + ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2"); + ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3"); + +cleanup: + test_usdt__destroy(skel); +} + +unsigned short test_usdt_100_semaphore SEC(".probes"); +unsigned short test_usdt_300_semaphore SEC(".probes"); +unsigned short test_usdt_400_semaphore SEC(".probes"); + +#define R10(F, X) F(X+0); F(X+1);F(X+2); F(X+3); F(X+4); \ + F(X+5); F(X+6); F(X+7); F(X+8); F(X+9); +#define R100(F, X) R10(F,X+ 0);R10(F,X+10);R10(F,X+20);R10(F,X+30);R10(F,X+40); \ + R10(F,X+50);R10(F,X+60);R10(F,X+70);R10(F,X+80);R10(F,X+90); + +/* carefully control that we get exactly 100 inlines by preventing inlining */ +static void __always_inline f100(int x) +{ + STAP_PROBE1(test, usdt_100, x); +} + +__weak void trigger_100_usdts(void) +{ + R100(f100, 0); +} + +/* we shouldn't be able to attach to test:usdt2_300 USDT as we don't have as + * many slots for specs. It's important that each STAP_PROBE2() invocation + * (after untolling) gets different arg spec due to compiler inlining i as + * a constant + */ +static void __always_inline f300(int x) +{ + STAP_PROBE1(test, usdt_300, x); +} + +__weak void trigger_300_usdts(void) +{ + R100(f300, 0); + R100(f300, 100); + R100(f300, 200); +} + +static void __always_inline f400(int x __attribute__((unused))) +{ + STAP_PROBE1(test, usdt_400, 400); +} + +/* this time we have 400 different USDT call sites, but they have uniform + * argument location, so libbpf's spec string deduplication logic should keep + * spec count use very small and so we should be able to attach to all 400 + * call sites + */ +__weak void trigger_400_usdts(void) +{ + R100(f400, 0); + R100(f400, 100); + R100(f400, 200); + R100(f400, 300); +} + +static void subtest_multispec_usdt(void) +{ + LIBBPF_OPTS(bpf_usdt_opts, opts); + struct test_usdt *skel; + struct test_usdt__bss *bss; + int err, i; + + skel = test_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bss = skel->bss; + bss->my_pid = getpid(); + + err = test_usdt__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* usdt_100 is auto-attached and there are 100 inlined call sites, + * let's validate that all of them are properly attached to and + * handled from BPF side + */ + trigger_100_usdts(); + + ASSERT_EQ(bss->usdt_100_called, 100, "usdt_100_called"); + ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum"); + + /* Stress test free spec ID tracking. By default libbpf allows up to + * 256 specs to be used, so if we don't return free spec IDs back + * after few detachments and re-attachments we should run out of + * available spec IDs. + */ + for (i = 0; i < 2; i++) { + bpf_link__destroy(skel->links.usdt_100); + + skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, + "/proc/self/exe", + "test", "usdt_100", NULL); + if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_100_reattach")) + goto cleanup; + + bss->usdt_100_sum = 0; + trigger_100_usdts(); + + ASSERT_EQ(bss->usdt_100_called, (i + 1) * 100 + 100, "usdt_100_called"); + ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum"); + } + + /* Now let's step it up and try to attach USDT that requires more than + * 256 attach points with different specs for each. + * Note that we need trigger_300_usdts() only to actually have 300 + * USDT call sites, we are not going to actually trace them. + */ + trigger_300_usdts(); + + /* we'll reuse usdt_100 BPF program for usdt_300 test */ + bpf_link__destroy(skel->links.usdt_100); + skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe", + "test", "usdt_300", NULL); + err = -errno; + if (!ASSERT_ERR_PTR(skel->links.usdt_100, "usdt_300_bad_attach")) + goto cleanup; + ASSERT_EQ(err, -E2BIG, "usdt_300_attach_err"); + + /* let's check that there are no "dangling" BPF programs attached due + * to partial success of the above test:usdt_300 attachment + */ + bss->usdt_100_called = 0; + bss->usdt_100_sum = 0; + + f300(777); /* this is 301st instance of usdt_300 */ + + ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called"); + ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum"); + + /* This time we have USDT with 400 inlined invocations, but arg specs + * should be the same across all sites, so libbpf will only need to + * use one spec and thus we'll be able to attach 400 uprobes + * successfully. + * + * Again, we are reusing usdt_100 BPF program. + */ + skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, + "/proc/self/exe", + "test", "usdt_400", NULL); + if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_400_attach")) + goto cleanup; + + trigger_400_usdts(); + + ASSERT_EQ(bss->usdt_100_called, 400, "usdt_400_called"); + ASSERT_EQ(bss->usdt_100_sum, 400 * 400, "usdt_400_sum"); + +cleanup: + test_usdt__destroy(skel); +} + +static FILE *urand_spawn(int *pid) +{ + FILE *f; + + /* urandom_read's stdout is wired into f */ + f = popen("./urandom_read 1 report-pid", "r"); + if (!f) + return NULL; + + if (fscanf(f, "%d", pid) != 1) { + pclose(f); + return NULL; + } + + return f; +} + +static int urand_trigger(FILE **urand_pipe) +{ + int exit_code; + + /* pclose() waits for child process to exit and returns their exit code */ + exit_code = pclose(*urand_pipe); + *urand_pipe = NULL; + + return exit_code; +} + +static void subtest_urandom_usdt(bool auto_attach) +{ + struct test_urandom_usdt *skel; + struct test_urandom_usdt__bss *bss; + struct bpf_link *l; + FILE *urand_pipe = NULL; + int err, urand_pid = 0; + + skel = test_urandom_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + urand_pipe = urand_spawn(&urand_pid); + if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn")) + goto cleanup; + + bss = skel->bss; + bss->urand_pid = urand_pid; + + if (auto_attach) { + err = test_urandom_usdt__attach(skel); + if (!ASSERT_OK(err, "skel_auto_attach")) + goto cleanup; + } else { + l = bpf_program__attach_usdt(skel->progs.urand_read_without_sema, + urand_pid, "./urandom_read", + "urand", "read_without_sema", NULL); + if (!ASSERT_OK_PTR(l, "urand_without_sema_attach")) + goto cleanup; + skel->links.urand_read_without_sema = l; + + l = bpf_program__attach_usdt(skel->progs.urand_read_with_sema, + urand_pid, "./urandom_read", + "urand", "read_with_sema", NULL); + if (!ASSERT_OK_PTR(l, "urand_with_sema_attach")) + goto cleanup; + skel->links.urand_read_with_sema = l; + + l = bpf_program__attach_usdt(skel->progs.urandlib_read_without_sema, + urand_pid, "./liburandom_read.so", + "urandlib", "read_without_sema", NULL); + if (!ASSERT_OK_PTR(l, "urandlib_without_sema_attach")) + goto cleanup; + skel->links.urandlib_read_without_sema = l; + + l = bpf_program__attach_usdt(skel->progs.urandlib_read_with_sema, + urand_pid, "./liburandom_read.so", + "urandlib", "read_with_sema", NULL); + if (!ASSERT_OK_PTR(l, "urandlib_with_sema_attach")) + goto cleanup; + skel->links.urandlib_read_with_sema = l; + + } + + /* trigger urandom_read USDTs */ + ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code"); + + ASSERT_EQ(bss->urand_read_without_sema_call_cnt, 1, "urand_wo_sema_cnt"); + ASSERT_EQ(bss->urand_read_without_sema_buf_sz_sum, 256, "urand_wo_sema_sum"); + + ASSERT_EQ(bss->urand_read_with_sema_call_cnt, 1, "urand_w_sema_cnt"); + ASSERT_EQ(bss->urand_read_with_sema_buf_sz_sum, 256, "urand_w_sema_sum"); + + ASSERT_EQ(bss->urandlib_read_without_sema_call_cnt, 1, "urandlib_wo_sema_cnt"); + ASSERT_EQ(bss->urandlib_read_without_sema_buf_sz_sum, 256, "urandlib_wo_sema_sum"); + + ASSERT_EQ(bss->urandlib_read_with_sema_call_cnt, 1, "urandlib_w_sema_cnt"); + ASSERT_EQ(bss->urandlib_read_with_sema_buf_sz_sum, 256, "urandlib_w_sema_sum"); + +cleanup: + if (urand_pipe) + pclose(urand_pipe); + test_urandom_usdt__destroy(skel); +} + +void test_usdt(void) +{ + if (test__start_subtest("basic")) + subtest_basic_usdt(); + if (test__start_subtest("multispec")) + subtest_multispec_usdt(); + if (test__start_subtest("urand_auto_attach")) + subtest_urandom_usdt(true /* auto_attach */); + if (test__start_subtest("urand_pid_attach")) + subtest_urandom_usdt(false /* auto_attach */); +} diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c new file mode 100644 index 000000000..02b18d018 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -0,0 +1,754 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ + +#define _GNU_SOURCE +#include <linux/compiler.h> +#include <linux/ring_buffer.h> +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include <sys/sysinfo.h> +#include <test_progs.h> +#include <uapi/linux/bpf.h> +#include <unistd.h> + +#include "user_ringbuf_fail.skel.h" +#include "user_ringbuf_success.skel.h" + +#include "../progs/test_user_ringbuf.h" + +static size_t log_buf_sz = 1 << 20; /* 1 MB */ +static char obj_log_buf[1048576]; +static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ; +static const long c_ringbuf_size = 1 << 12; /* 1 small page */ +static const long c_max_entries = c_ringbuf_size / c_sample_size; + +static void drain_current_samples(void) +{ + syscall(__NR_getpgid); +} + +static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples) +{ + int i, err = 0; + + /* Write some number of samples to the ring buffer. */ + for (i = 0; i < num_samples; i++) { + struct sample *entry; + int read; + + entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry)); + if (!entry) { + err = -errno; + goto done; + } + + entry->pid = getpid(); + entry->seq = i; + entry->value = i * i; + + read = snprintf(entry->comm, sizeof(entry->comm), "%u", i); + if (read <= 0) { + /* Assert on the error path to avoid spamming logs with + * mostly success messages. + */ + ASSERT_GT(read, 0, "snprintf_comm"); + err = read; + user_ring_buffer__discard(ringbuf, entry); + goto done; + } + + user_ring_buffer__submit(ringbuf, entry); + } + +done: + drain_current_samples(); + + return err; +} + +static struct user_ringbuf_success *open_load_ringbuf_skel(void) +{ + struct user_ringbuf_success *skel; + int err; + + skel = user_ringbuf_success__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return NULL; + + err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size); + if (!ASSERT_OK(err, "set_max_entries")) + goto cleanup; + + err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size); + if (!ASSERT_OK(err, "set_max_entries")) + goto cleanup; + + err = user_ringbuf_success__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + return skel; + +cleanup: + user_ringbuf_success__destroy(skel); + return NULL; +} + +static void test_user_ringbuf_mappings(void) +{ + int err, rb_fd; + int page_size = getpagesize(); + void *mmap_ptr; + struct user_ringbuf_success *skel; + + skel = open_load_ringbuf_skel(); + if (!skel) + return; + + rb_fd = bpf_map__fd(skel->maps.user_ringbuf); + /* cons_pos can be mapped R/O, can't add +X with mprotect. */ + mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0); + ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect"); + ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos"); + err = -errno; + ASSERT_ERR(err, "wr_prod_pos_err"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons"); + + /* prod_pos can be mapped RW, can't add +X with mprotect. */ + mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, + rb_fd, page_size); + ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect"); + err = -errno; + ASSERT_ERR(err, "wr_prod_pos_err"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod"); + + /* data pages can be mapped RW, can't add +X with mprotect. */ + mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, + 2 * page_size); + ASSERT_OK_PTR(mmap_ptr, "rw_data"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect"); + err = -errno; + ASSERT_ERR(err, "exec_data_err"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data"); + + user_ringbuf_success__destroy(skel); +} + +static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out, + struct ring_buffer **kern_ringbuf_out, + ring_buffer_sample_fn callback, + struct user_ring_buffer **user_ringbuf_out) +{ + struct user_ringbuf_success *skel; + struct ring_buffer *kern_ringbuf = NULL; + struct user_ring_buffer *user_ringbuf = NULL; + int err = -ENOMEM, rb_fd; + + skel = open_load_ringbuf_skel(); + if (!skel) + return err; + + /* only trigger BPF program for current process */ + skel->bss->pid = getpid(); + + if (kern_ringbuf_out) { + rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf); + kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL); + if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create")) + goto cleanup; + + *kern_ringbuf_out = kern_ringbuf; + } + + if (user_ringbuf_out) { + rb_fd = bpf_map__fd(skel->maps.user_ringbuf); + user_ringbuf = user_ring_buffer__new(rb_fd, NULL); + if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create")) + goto cleanup; + + *user_ringbuf_out = user_ringbuf; + ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load"); + } + + err = user_ringbuf_success__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + *skel_out = skel; + return 0; + +cleanup: + if (kern_ringbuf_out) + *kern_ringbuf_out = NULL; + if (user_ringbuf_out) + *user_ringbuf_out = NULL; + ring_buffer__free(kern_ringbuf); + user_ring_buffer__free(user_ringbuf); + user_ringbuf_success__destroy(skel); + return err; +} + +static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out, + struct user_ring_buffer **ringbuf_out) +{ + return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out); +} + +static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel, + __u32 size, __u64 producer_pos, int err) +{ + void *data_ptr; + __u64 *producer_pos_ptr; + int rb_fd, page_size = getpagesize(); + + rb_fd = bpf_map__fd(skel->maps.user_ringbuf); + + ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample"); + + /* Map the producer_pos as RW. */ + producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, + MAP_SHARED, rb_fd, page_size); + ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr"); + + /* Map the data pages as RW. */ + data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); + ASSERT_OK_PTR(data_ptr, "rw_data"); + + memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ); + *(__u32 *)data_ptr = size; + + /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */ + smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ); + + drain_current_samples(); + ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample"); + ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample"); + + ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos"); + ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr"); +} + +static void test_user_ringbuf_post_misaligned(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + __u32 size = (1 << 5) + 7; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "misaligned_skel")) + return; + + manually_write_test_invalid_sample(skel, size, size, -EINVAL); + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_post_producer_wrong_offset(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + __u32 size = (1 << 5); + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "wrong_offset_skel")) + return; + + manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL); + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_post_larger_than_ringbuf_sz(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + __u32 size = c_ringbuf_size; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "huge_sample_skel")) + return; + + manually_write_test_invalid_sample(skel, size, size, -E2BIG); + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_basic(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "ringbuf_basic_skel")) + return; + + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + + err = write_samples(ringbuf, 2); + if (!ASSERT_OK(err, "write_samples")) + goto cleanup; + + ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after"); + +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_sample_full_ring_buffer(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + void *sample; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "ringbuf_full_sample_skel")) + return; + + sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ); + if (!ASSERT_OK_PTR(sample, "full_sample")) + goto cleanup; + + user_ring_buffer__submit(ringbuf, sample); + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + drain_current_samples(); + ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after"); + +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_post_alignment_autoadjust(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + struct sample *sample; + int err; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel")) + return; + + /* libbpf should automatically round any sample up to an 8-byte alignment. */ + sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1); + ASSERT_OK_PTR(sample, "reserve_autoaligned"); + user_ring_buffer__submit(ringbuf, sample); + + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + drain_current_samples(); + ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after"); + + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_overfill(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (err) + return; + + err = write_samples(ringbuf, c_max_entries * 5); + ASSERT_ERR(err, "write_samples"); + ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries"); + + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_discards_properly_ignored(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err, num_discarded = 0; + __u64 *token; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (err) + return; + + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + + while (1) { + /* Write samples until the buffer is full. */ + token = user_ring_buffer__reserve(ringbuf, sizeof(*token)); + if (!token) + break; + + user_ring_buffer__discard(ringbuf, token); + num_discarded++; + } + + if (!ASSERT_GE(num_discarded, 0, "num_discarded")) + goto cleanup; + + /* Should not read any samples, as they are all discarded. */ + ASSERT_EQ(skel->bss->read, 0, "num_pre_kick"); + drain_current_samples(); + ASSERT_EQ(skel->bss->read, 0, "num_post_kick"); + + /* Now that the ring buffer has been drained, we should be able to + * reserve another token. + */ + token = user_ring_buffer__reserve(ringbuf, sizeof(*token)); + + if (!ASSERT_OK_PTR(token, "new_token")) + goto cleanup; + + user_ring_buffer__discard(ringbuf, token); +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void test_user_ringbuf_loop(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + uint32_t total_samples = 8192; + uint32_t remaining_samples = total_samples; + int err; + + BUILD_BUG_ON(total_samples <= c_max_entries); + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (err) + return; + + do { + uint32_t curr_samples; + + curr_samples = remaining_samples > c_max_entries + ? c_max_entries : remaining_samples; + err = write_samples(ringbuf, curr_samples); + if (err != 0) { + /* Assert inside of if statement to avoid flooding logs + * on the success path. + */ + ASSERT_OK(err, "write_samples"); + goto cleanup; + } + + remaining_samples -= curr_samples; + ASSERT_EQ(skel->bss->read, total_samples - remaining_samples, + "current_batched_entries"); + } while (remaining_samples > 0); + ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries"); + +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static int send_test_message(struct user_ring_buffer *ringbuf, + enum test_msg_op op, s64 operand_64, + s32 operand_32) +{ + struct test_msg *msg; + + msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg)); + if (!msg) { + /* Assert on the error path to avoid spamming logs with mostly + * success messages. + */ + ASSERT_OK_PTR(msg, "reserve_msg"); + return -ENOMEM; + } + + msg->msg_op = op; + + switch (op) { + case TEST_MSG_OP_INC64: + case TEST_MSG_OP_MUL64: + msg->operand_64 = operand_64; + break; + case TEST_MSG_OP_INC32: + case TEST_MSG_OP_MUL32: + msg->operand_32 = operand_32; + break; + default: + PRINT_FAIL("Invalid operand %d\n", op); + user_ring_buffer__discard(ringbuf, msg); + return -EINVAL; + } + + user_ring_buffer__submit(ringbuf, msg); + + return 0; +} + +static void kick_kernel_read_messages(void) +{ + syscall(__NR_prctl); +} + +static int handle_kernel_msg(void *ctx, void *data, size_t len) +{ + struct user_ringbuf_success *skel = ctx; + struct test_msg *msg = data; + + switch (msg->msg_op) { + case TEST_MSG_OP_INC64: + skel->bss->user_mutated += msg->operand_64; + return 0; + case TEST_MSG_OP_INC32: + skel->bss->user_mutated += msg->operand_32; + return 0; + case TEST_MSG_OP_MUL64: + skel->bss->user_mutated *= msg->operand_64; + return 0; + case TEST_MSG_OP_MUL32: + skel->bss->user_mutated *= msg->operand_32; + return 0; + default: + fprintf(stderr, "Invalid operand %d\n", msg->msg_op); + return -EINVAL; + } +} + +static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf, + struct user_ringbuf_success *skel) +{ + int cnt; + + cnt = ring_buffer__consume(kern_ringbuf); + ASSERT_EQ(cnt, 8, "consume_kern_ringbuf"); + ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err"); +} + +static void test_user_ringbuf_msg_protocol(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *user_ringbuf; + struct ring_buffer *kern_ringbuf; + int err, i; + __u64 expected_kern = 0; + + err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf); + if (!ASSERT_OK(err, "create_ringbufs")) + return; + + for (i = 0; i < 64; i++) { + enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS; + __u64 operand_64 = TEST_OP_64; + __u32 operand_32 = TEST_OP_32; + + err = send_test_message(user_ringbuf, op, operand_64, operand_32); + if (err) { + /* Only assert on a failure to avoid spamming success logs. */ + ASSERT_OK(err, "send_test_message"); + goto cleanup; + } + + switch (op) { + case TEST_MSG_OP_INC64: + expected_kern += operand_64; + break; + case TEST_MSG_OP_INC32: + expected_kern += operand_32; + break; + case TEST_MSG_OP_MUL64: + expected_kern *= operand_64; + break; + case TEST_MSG_OP_MUL32: + expected_kern *= operand_32; + break; + default: + PRINT_FAIL("Unexpected op %d\n", op); + goto cleanup; + } + + if (i % 8 == 0) { + kick_kernel_read_messages(); + ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern"); + ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err"); + drain_kernel_messages_buffer(kern_ringbuf, skel); + } + } + +cleanup: + ring_buffer__free(kern_ringbuf); + user_ring_buffer__free(user_ringbuf); + user_ringbuf_success__destroy(skel); +} + +static void *kick_kernel_cb(void *arg) +{ + /* Kick the kernel, causing it to drain the ring buffer and then wake + * up the test thread waiting on epoll. + */ + syscall(__NR_getrlimit); + + return NULL; +} + +static int spawn_kick_thread_for_poll(void) +{ + pthread_t thread; + + return pthread_create(&thread, NULL, kick_kernel_cb, NULL); +} + +static void test_user_ringbuf_blocking_reserve(void) +{ + struct user_ringbuf_success *skel; + struct user_ring_buffer *ringbuf; + int err, num_written = 0; + __u64 *token; + + err = load_skel_create_user_ringbuf(&skel, &ringbuf); + if (err) + return; + + ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before"); + + while (1) { + /* Write samples until the buffer is full. */ + token = user_ring_buffer__reserve(ringbuf, sizeof(*token)); + if (!token) + break; + + *token = 0xdeadbeef; + + user_ring_buffer__submit(ringbuf, token); + num_written++; + } + + if (!ASSERT_GE(num_written, 0, "num_written")) + goto cleanup; + + /* Should not have read any samples until the kernel is kicked. */ + ASSERT_EQ(skel->bss->read, 0, "num_pre_kick"); + + /* We correctly time out after 1 second, without a sample. */ + token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000); + if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token")) + goto cleanup; + + err = spawn_kick_thread_for_poll(); + if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n")) + goto cleanup; + + /* After spawning another thread that asychronously kicks the kernel to + * drain the messages, we're able to block and successfully get a + * sample once we receive an event notification. + */ + token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000); + + if (!ASSERT_OK_PTR(token, "block_token")) + goto cleanup; + + ASSERT_GT(skel->bss->read, 0, "num_post_kill"); + ASSERT_LE(skel->bss->read, num_written, "num_post_kill"); + ASSERT_EQ(skel->bss->err, 0, "err_post_poll"); + user_ring_buffer__discard(ringbuf, token); + +cleanup: + user_ring_buffer__free(ringbuf); + user_ringbuf_success__destroy(skel); +} + +static struct { + const char *prog_name; + const char *expected_err_msg; +} failure_tests[] = { + /* failure cases */ + {"user_ringbuf_callback_bad_access1", "negative offset dynptr_ptr ptr"}, + {"user_ringbuf_callback_bad_access2", "dereference of modified dynptr_ptr ptr"}, + {"user_ringbuf_callback_write_forbidden", "invalid mem access 'dynptr_ptr'"}, + {"user_ringbuf_callback_null_context_write", "invalid mem access 'scalar'"}, + {"user_ringbuf_callback_null_context_read", "invalid mem access 'scalar'"}, + {"user_ringbuf_callback_discard_dynptr", "arg 1 is an unacquired reference"}, + {"user_ringbuf_callback_submit_dynptr", "arg 1 is an unacquired reference"}, + {"user_ringbuf_callback_invalid_return", "At callback return the register R0 has value"}, +}; + +#define SUCCESS_TEST(_func) { _func, #_func } + +static struct { + void (*test_callback)(void); + const char *test_name; +} success_tests[] = { + SUCCESS_TEST(test_user_ringbuf_mappings), + SUCCESS_TEST(test_user_ringbuf_post_misaligned), + SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset), + SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz), + SUCCESS_TEST(test_user_ringbuf_basic), + SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer), + SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust), + SUCCESS_TEST(test_user_ringbuf_overfill), + SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored), + SUCCESS_TEST(test_user_ringbuf_loop), + SUCCESS_TEST(test_user_ringbuf_msg_protocol), + SUCCESS_TEST(test_user_ringbuf_blocking_reserve), +}; + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct bpf_program *prog; + struct user_ringbuf_fail *skel; + int err; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = user_ringbuf_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + bpf_map__set_max_entries(skel->maps.user_ringbuf, getpagesize()); + + err = user_ringbuf_fail__load(skel); + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + user_ringbuf_fail__destroy(skel); +} + +void test_user_ringbuf(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { + if (!test__start_subtest(success_tests[i].test_name)) + continue; + + success_tests[i].test_callback(); + } + + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { + if (!test__start_subtest(failure_tests[i].prog_name)) + continue; + + verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/varlen.c b/tools/testing/selftests/bpf/prog_tests/varlen.c new file mode 100644 index 000000000..4d7056f8f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/varlen.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include <time.h> +#include "test_varlen.skel.h" + +#define CHECK_VAL(got, exp) \ + CHECK((got) != (exp), "check", "got %ld != exp %ld\n", \ + (long)(got), (long)(exp)) + +void test_varlen(void) +{ + int duration = 0, err; + struct test_varlen* skel; + struct test_varlen__bss *bss; + struct test_varlen__data *data; + const char str1[] = "Hello, "; + const char str2[] = "World!"; + const char exp_str[] = "Hello, \0World!\0"; + const int size1 = sizeof(str1); + const int size2 = sizeof(str2); + + skel = test_varlen__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + bss = skel->bss; + data = skel->data; + + err = test_varlen__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + bss->test_pid = getpid(); + + /* trigger everything */ + memcpy(bss->buf_in1, str1, size1); + memcpy(bss->buf_in2, str2, size2); + bss->capture = true; + usleep(1); + bss->capture = false; + + CHECK_VAL(bss->payload1_len1, size1); + CHECK_VAL(bss->payload1_len2, size2); + CHECK_VAL(bss->total1, size1 + size2); + CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check", + "doesn't match!\n"); + + CHECK_VAL(data->payload2_len1, size1); + CHECK_VAL(data->payload2_len2, size2); + CHECK_VAL(data->total2, size1 + size2); + CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check", + "doesn't match!\n"); + + CHECK_VAL(data->payload3_len1, size1); + CHECK_VAL(data->payload3_len2, size2); + CHECK_VAL(data->total3, size1 + size2); + CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check", + "doesn't match!\n"); + + CHECK_VAL(data->payload4_len1, size1); + CHECK_VAL(data->payload4_len2, size2); + CHECK_VAL(data->total4, size1 + size2); + CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check", + "doesn't match!\n"); + + CHECK_VAL(bss->ret_bad_read, -EFAULT); + CHECK_VAL(data->payload_bad[0], 0x42); + CHECK_VAL(data->payload_bad[1], 0x42); + CHECK_VAL(data->payload_bad[2], 0); + CHECK_VAL(data->payload_bad[3], 0x42); + CHECK_VAL(data->payload_bad[4], 0x42); +cleanup: + test_varlen__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/verif_stats.c b/tools/testing/selftests/bpf/prog_tests/verif_stats.c new file mode 100644 index 000000000..a47e7c0e1 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/verif_stats.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <test_progs.h> + +#include "trace_vprintk.lskel.h" + +void test_verif_stats(void) +{ + __u32 len = sizeof(struct bpf_prog_info); + struct trace_vprintk_lskel *skel; + struct bpf_prog_info info = {}; + int err; + + skel = trace_vprintk_lskel__open_and_load(); + if (!ASSERT_OK_PTR(skel, "trace_vprintk__open_and_load")) + goto cleanup; + + err = bpf_obj_get_info_by_fd(skel->progs.sys_enter.prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto cleanup; + + if (!ASSERT_GT(info.verified_insns, 0, "verified_insns")) + goto cleanup; + +cleanup: + trace_vprintk_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c new file mode 100644 index 000000000..579d6ee83 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c @@ -0,0 +1,399 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH + * + * Author: Roberto Sassu <roberto.sassu@huawei.com> + */ + +#include <stdio.h> +#include <errno.h> +#include <stdlib.h> +#include <unistd.h> +#include <endian.h> +#include <limits.h> +#include <sys/stat.h> +#include <sys/wait.h> +#include <sys/mman.h> +#include <linux/keyctl.h> +#include <test_progs.h> + +#include "test_verify_pkcs7_sig.skel.h" + +#define MAX_DATA_SIZE (1024 * 1024) +#define MAX_SIG_SIZE 1024 + +#define VERIFY_USE_SECONDARY_KEYRING (1UL) +#define VERIFY_USE_PLATFORM_KEYRING (2UL) + +/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */ +#define MODULE_SIG_STRING "~Module signature appended~\n" + +/* + * Module signature information block. + * + * The constituents of the signature section are, in order: + * + * - Signer's name + * - Key identifier + * - Signature data + * - Information block + */ +struct module_signature { + __u8 algo; /* Public-key crypto algorithm [0] */ + __u8 hash; /* Digest algorithm [0] */ + __u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */ + __u8 signer_len; /* Length of signer's name [0] */ + __u8 key_id_len; /* Length of key identifier [0] */ + __u8 __pad[3]; + __be32 sig_len; /* Length of signature data */ +}; + +struct data { + __u8 data[MAX_DATA_SIZE]; + __u32 data_len; + __u8 sig[MAX_SIG_SIZE]; + __u32 sig_len; +}; + +static bool kfunc_not_supported; + +static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, + va_list args) +{ + if (strcmp(fmt, "libbpf: extern (func ksym) '%s': not found in kernel or module BTFs\n")) + return 0; + + if (strcmp(va_arg(args, char *), "bpf_verify_pkcs7_signature")) + return 0; + + kfunc_not_supported = true; + return 0; +} + +static int _run_setup_process(const char *setup_dir, const char *cmd) +{ + int child_pid, child_status; + + child_pid = fork(); + if (child_pid == 0) { + execlp("./verify_sig_setup.sh", "./verify_sig_setup.sh", cmd, + setup_dir, NULL); + exit(errno); + + } else if (child_pid > 0) { + waitpid(child_pid, &child_status, 0); + return WEXITSTATUS(child_status); + } + + return -EINVAL; +} + +static int populate_data_item_str(const char *tmp_dir, struct data *data_item) +{ + struct stat st; + char data_template[] = "/tmp/dataXXXXXX"; + char path[PATH_MAX]; + int ret, fd, child_status, child_pid; + + data_item->data_len = 4; + memcpy(data_item->data, "test", data_item->data_len); + + fd = mkstemp(data_template); + if (fd == -1) + return -errno; + + ret = write(fd, data_item->data, data_item->data_len); + + close(fd); + + if (ret != data_item->data_len) { + ret = -EIO; + goto out; + } + + child_pid = fork(); + + if (child_pid == -1) { + ret = -errno; + goto out; + } + + if (child_pid == 0) { + snprintf(path, sizeof(path), "%s/signing_key.pem", tmp_dir); + + return execlp("./sign-file", "./sign-file", "-d", "sha256", + path, path, data_template, NULL); + } + + waitpid(child_pid, &child_status, 0); + + ret = WEXITSTATUS(child_status); + if (ret) + goto out; + + snprintf(path, sizeof(path), "%s.p7s", data_template); + + ret = stat(path, &st); + if (ret == -1) { + ret = -errno; + goto out; + } + + if (st.st_size > sizeof(data_item->sig)) { + ret = -EINVAL; + goto out_sig; + } + + data_item->sig_len = st.st_size; + + fd = open(path, O_RDONLY); + if (fd == -1) { + ret = -errno; + goto out_sig; + } + + ret = read(fd, data_item->sig, data_item->sig_len); + + close(fd); + + if (ret != data_item->sig_len) { + ret = -EIO; + goto out_sig; + } + + ret = 0; +out_sig: + unlink(path); +out: + unlink(data_template); + return ret; +} + +static int populate_data_item_mod(struct data *data_item) +{ + char mod_path[PATH_MAX], *mod_path_ptr; + struct stat st; + void *mod; + FILE *fp; + struct module_signature ms; + int ret, fd, modlen, marker_len, sig_len; + + data_item->data_len = 0; + + if (stat("/lib/modules", &st) == -1) + return 0; + + /* Requires CONFIG_TCP_CONG_BIC=m. */ + fp = popen("find /lib/modules/$(uname -r) -name tcp_bic.ko", "r"); + if (!fp) + return 0; + + mod_path_ptr = fgets(mod_path, sizeof(mod_path), fp); + pclose(fp); + + if (!mod_path_ptr) + return 0; + + mod_path_ptr = strchr(mod_path, '\n'); + if (!mod_path_ptr) + return 0; + + *mod_path_ptr = '\0'; + + if (stat(mod_path, &st) == -1) + return 0; + + modlen = st.st_size; + marker_len = sizeof(MODULE_SIG_STRING) - 1; + + fd = open(mod_path, O_RDONLY); + if (fd == -1) + return -errno; + + mod = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + + close(fd); + + if (mod == MAP_FAILED) + return -errno; + + if (strncmp(mod + modlen - marker_len, MODULE_SIG_STRING, marker_len)) { + ret = -EINVAL; + goto out; + } + + modlen -= marker_len; + + memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms)); + + sig_len = __be32_to_cpu(ms.sig_len); + modlen -= sig_len + sizeof(ms); + + if (modlen > sizeof(data_item->data)) { + ret = -E2BIG; + goto out; + } + + memcpy(data_item->data, mod, modlen); + data_item->data_len = modlen; + + if (sig_len > sizeof(data_item->sig)) { + ret = -E2BIG; + goto out; + } + + memcpy(data_item->sig, mod + modlen, sig_len); + data_item->sig_len = sig_len; + ret = 0; +out: + munmap(mod, st.st_size); + return ret; +} + +void test_verify_pkcs7_sig(void) +{ + libbpf_print_fn_t old_print_cb; + char tmp_dir_template[] = "/tmp/verify_sigXXXXXX"; + char *tmp_dir; + struct test_verify_pkcs7_sig *skel = NULL; + struct bpf_map *map; + struct data data; + int ret, zero = 0; + + /* Trigger creation of session keyring. */ + syscall(__NR_request_key, "keyring", "_uid.0", NULL, + KEY_SPEC_SESSION_KEYRING); + + tmp_dir = mkdtemp(tmp_dir_template); + if (!ASSERT_OK_PTR(tmp_dir, "mkdtemp")) + return; + + ret = _run_setup_process(tmp_dir, "setup"); + if (!ASSERT_OK(ret, "_run_setup_process")) + goto close_prog; + + skel = test_verify_pkcs7_sig__open(); + if (!ASSERT_OK_PTR(skel, "test_verify_pkcs7_sig__open")) + goto close_prog; + + old_print_cb = libbpf_set_print(libbpf_print_cb); + ret = test_verify_pkcs7_sig__load(skel); + libbpf_set_print(old_print_cb); + + if (ret < 0 && kfunc_not_supported) { + printf( + "%s:SKIP:bpf_verify_pkcs7_signature() kfunc not supported\n", + __func__); + test__skip(); + goto close_prog; + } + + if (!ASSERT_OK(ret, "test_verify_pkcs7_sig__load")) + goto close_prog; + + ret = test_verify_pkcs7_sig__attach(skel); + if (!ASSERT_OK(ret, "test_verify_pkcs7_sig__attach")) + goto close_prog; + + map = bpf_object__find_map_by_name(skel->obj, "data_input"); + if (!ASSERT_OK_PTR(map, "data_input not found")) + goto close_prog; + + skel->bss->monitored_pid = getpid(); + + /* Test without data and signature. */ + skel->bss->user_keyring_serial = KEY_SPEC_SESSION_KEYRING; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input")) + goto close_prog; + + /* Test successful signature verification with session keyring. */ + ret = populate_data_item_str(tmp_dir, &data); + if (!ASSERT_OK(ret, "populate_data_item_str")) + goto close_prog; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem data_input")) + goto close_prog; + + /* Test successful signature verification with testing keyring. */ + skel->bss->user_keyring_serial = syscall(__NR_request_key, "keyring", + "ebpf_testing_keyring", NULL, + KEY_SPEC_SESSION_KEYRING); + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem data_input")) + goto close_prog; + + /* + * Ensure key_task_permission() is called and rejects the keyring + * (no Search permission). + */ + syscall(__NR_keyctl, KEYCTL_SETPERM, skel->bss->user_keyring_serial, + 0x37373737); + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input")) + goto close_prog; + + syscall(__NR_keyctl, KEYCTL_SETPERM, skel->bss->user_keyring_serial, + 0x3f3f3f3f); + + /* + * Ensure key_validate() is called and rejects the keyring (key expired) + */ + syscall(__NR_keyctl, KEYCTL_SET_TIMEOUT, + skel->bss->user_keyring_serial, 1); + sleep(1); + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input")) + goto close_prog; + + skel->bss->user_keyring_serial = KEY_SPEC_SESSION_KEYRING; + + /* Test with corrupted data (signature verification should fail). */ + data.data[0] = 'a'; + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, BPF_ANY); + if (!ASSERT_LT(ret, 0, "bpf_map_update_elem data_input")) + goto close_prog; + + ret = populate_data_item_mod(&data); + if (!ASSERT_OK(ret, "populate_data_item_mod")) + goto close_prog; + + /* Test signature verification with system keyrings. */ + if (data.data_len) { + skel->bss->user_keyring_serial = 0; + skel->bss->system_keyring_id = 0; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, + BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem data_input")) + goto close_prog; + + skel->bss->system_keyring_id = VERIFY_USE_SECONDARY_KEYRING; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, + BPF_ANY); + if (!ASSERT_OK(ret, "bpf_map_update_elem data_input")) + goto close_prog; + + skel->bss->system_keyring_id = VERIFY_USE_PLATFORM_KEYRING; + + ret = bpf_map_update_elem(bpf_map__fd(map), &zero, &data, + BPF_ANY); + ASSERT_LT(ret, 0, "bpf_map_update_elem data_input"); + } + +close_prog: + _run_setup_process(tmp_dir, "cleanup"); + + if (!skel) + return; + + skel->bss->monitored_pid = 0; + test_verify_pkcs7_sig__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/vmlinux.c b/tools/testing/selftests/bpf/prog_tests/vmlinux.c new file mode 100644 index 000000000..72310cfc6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/vmlinux.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include <test_progs.h> +#include <time.h> +#include "test_vmlinux.skel.h" + +#define MY_TV_NSEC 1337 + +static void nsleep() +{ + struct timespec ts = { .tv_nsec = MY_TV_NSEC }; + + (void)syscall(__NR_nanosleep, &ts, NULL); +} + +void test_vmlinux(void) +{ + int duration = 0, err; + struct test_vmlinux* skel; + struct test_vmlinux__bss *bss; + + skel = test_vmlinux__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + bss = skel->bss; + + err = test_vmlinux__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger everything */ + nsleep(); + + CHECK(!bss->tp_called, "tp", "not called\n"); + CHECK(!bss->raw_tp_called, "raw_tp", "not called\n"); + CHECK(!bss->tp_btf_called, "tp_btf", "not called\n"); + CHECK(!bss->kprobe_called, "kprobe", "not called\n"); + CHECK(!bss->fentry_called, "fentry", "not called\n"); + +cleanup: + test_vmlinux__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c new file mode 100644 index 000000000..947863a1d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +void test_xdp(void) +{ + struct vip key4 = {.protocol = 6, .family = AF_INET}; + struct vip key6 = {.protocol = 6, .family = AF_INET6}; + struct iptnl_info value4 = {.family = AF_INET}; + struct iptnl_info value6 = {.family = AF_INET6}; + const char *file = "./test_xdp.bpf.o"; + struct bpf_object *obj; + char buf[128]; + struct ipv6hdr iph6; + struct iphdr iph; + int err, prog_fd, map_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + map_fd = bpf_find_map(__func__, obj, "vip2tnl"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &key4, &value4, 0); + bpf_map_update_elem(map_fd, &key6, &value6, 0); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, XDP_TX, "ipv4 test_run retval"); + ASSERT_EQ(topts.data_size_out, 74, "ipv4 test_run data_size_out"); + ASSERT_EQ(iph.protocol, IPPROTO_IPIP, "ipv4 test_run iph.protocol"); + + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = sizeof(buf); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + memcpy(&iph6, buf + sizeof(struct ethhdr), sizeof(iph6)); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, XDP_TX, "ipv6 test_run retval"); + ASSERT_EQ(topts.data_size_out, 114, "ipv6 test_run data_size_out"); + ASSERT_EQ(iph6.nexthdr, IPPROTO_IPV6, "ipv6 test_run iph6.nexthdr"); +out: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c new file mode 100644 index 000000000..fce203640 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +static void test_xdp_update_frags(void) +{ + const char *file = "./test_xdp_update_frags.bpf.o"; + int err, prog_fd, max_skb_frags, buf_size, num; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 *offset; + __u8 *buf; + FILE *f; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + return; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(128); + if (!ASSERT_OK_PTR(buf, "alloc buf 128b")) + goto out; + + memset(buf, 0, 128); + offset = (__u32 *)buf; + *offset = 16; + buf[*offset] = 0xaa; /* marker at offset 16 (head) */ + buf[*offset + 15] = 0xaa; /* marker at offset 31 (head) */ + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 128; + topts.data_size_out = 128; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + + /* test_xdp_update_frags: buf[16,31]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[16], 0xbb, "xdp_update_frag buf[16]"); + ASSERT_EQ(buf[31], 0xbb, "xdp_update_frag buf[31]"); + + free(buf); + + buf = malloc(9000); + if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb")) + goto out; + + memset(buf, 0, 9000); + offset = (__u32 *)buf; + *offset = 5000; + buf[*offset] = 0xaa; /* marker at offset 5000 (frag0) */ + buf[*offset + 15] = 0xaa; /* marker at offset 5015 (frag0) */ + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 9000; + topts.data_size_out = 9000; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + + /* test_xdp_update_frags: buf[5000,5015]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[5000], 0xbb, "xdp_update_frag buf[5000]"); + ASSERT_EQ(buf[5015], 0xbb, "xdp_update_frag buf[5015]"); + + memset(buf, 0, 9000); + offset = (__u32 *)buf; + *offset = 3510; + buf[*offset] = 0xaa; /* marker at offset 3510 (head) */ + buf[*offset + 15] = 0xaa; /* marker at offset 3525 (frag0) */ + + err = bpf_prog_test_run_opts(prog_fd, &topts); + + /* test_xdp_update_frags: buf[3510,3525]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[3510], 0xbb, "xdp_update_frag buf[3510]"); + ASSERT_EQ(buf[3525], 0xbb, "xdp_update_frag buf[3525]"); + + memset(buf, 0, 9000); + offset = (__u32 *)buf; + *offset = 7606; + buf[*offset] = 0xaa; /* marker at offset 7606 (frag0) */ + buf[*offset + 15] = 0xaa; /* marker at offset 7621 (frag1) */ + + err = bpf_prog_test_run_opts(prog_fd, &topts); + + /* test_xdp_update_frags: buf[7606,7621]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[7606], 0xbb, "xdp_update_frag buf[7606]"); + ASSERT_EQ(buf[7621], 0xbb, "xdp_update_frag buf[7621]"); + + free(buf); + + /* test_xdp_update_frags: unsupported buffer size */ + f = fopen("/proc/sys/net/core/max_skb_frags", "r"); + if (!ASSERT_OK_PTR(f, "max_skb_frag file pointer")) + goto out; + + num = fscanf(f, "%d", &max_skb_frags); + fclose(f); + + if (!ASSERT_EQ(num, 1, "max_skb_frags read failed")) + goto out; + + /* xdp_buff linear area size is always set to 4096 in the + * bpf_prog_test_run_xdp routine. + */ + buf_size = 4096 + (max_skb_frags + 1) * sysconf(_SC_PAGE_SIZE); + buf = malloc(buf_size); + if (!ASSERT_OK_PTR(buf, "alloc buf")) + goto out; + + memset(buf, 0, buf_size); + offset = (__u32 *)buf; + *offset = 16; + buf[*offset] = 0xaa; + buf[*offset + 15] = 0xaa; + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = buf_size; + topts.data_size_out = buf_size; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_EQ(err, -ENOMEM, + "unsupported buf size, possible non-default /proc/sys/net/core/max_skb_flags?"); + free(buf); +out: + bpf_object__close(obj); +} + +void test_xdp_adjust_frags(void) +{ + if (test__start_subtest("xdp_adjust_frags")) + test_xdp_update_frags(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c new file mode 100644 index 000000000..39973ea1c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +static void test_xdp_adjust_tail_shrink(void) +{ + const char *file = "./test_xdp_adjust_tail_shrink.bpf.o"; + __u32 expect_sz; + struct bpf_object *obj; + int err, prog_fd; + char buf[128]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (!ASSERT_OK(err, "test_xdp_adjust_tail_shrink")) + return; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv4"); + ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval"); + + expect_sz = sizeof(pkt_v6) - 20; /* Test shrink with 20 bytes */ + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = sizeof(buf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6"); + ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); + ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size"); + + bpf_object__close(obj); +} + +static void test_xdp_adjust_tail_grow(void) +{ + const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; + struct bpf_object *obj; + char buf[4096]; /* avoid segfault: large buf to hold grow results */ + __u32 expect_sz; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow")) + return; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv4"); + ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval"); + + expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */ + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = sizeof(buf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6"); + ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); + ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size"); + + bpf_object__close(obj); +} + +static void test_xdp_adjust_tail_grow2(void) +{ + const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; + char buf[4096]; /* avoid segfault: large buf to hold grow results */ + int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/; + struct bpf_object *obj; + int err, cnt, i; + int max_grow, prog_fd; + + LIBBPF_OPTS(bpf_test_run_opts, tattr, + .repeat = 1, + .data_in = &buf, + .data_out = &buf, + .data_size_in = 0, /* Per test */ + .data_size_out = 0, /* Per test */ + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (!ASSERT_OK(err, "test_xdp_adjust_tail_grow")) + return; + + /* Test case-64 */ + memset(buf, 1, sizeof(buf)); + tattr.data_size_in = 64; /* Determine test case via pkt size */ + tattr.data_size_out = 128; /* Limit copy_size */ + /* Kernel side alloc packet memory area that is zero init */ + err = bpf_prog_test_run_opts(prog_fd, &tattr); + + ASSERT_EQ(errno, ENOSPC, "case-64 errno"); /* Due limit copy_size in bpf_test_finish */ + ASSERT_EQ(tattr.retval, XDP_TX, "case-64 retval"); + ASSERT_EQ(tattr.data_size_out, 192, "case-64 data_size_out"); /* Expected grow size */ + + /* Extra checks for data contents */ + ASSERT_EQ(buf[0], 1, "case-64-data buf[0]"); /* 0-63 memset to 1 */ + ASSERT_EQ(buf[63], 1, "case-64-data buf[63]"); + ASSERT_EQ(buf[64], 0, "case-64-data buf[64]"); /* 64-127 memset to 0 */ + ASSERT_EQ(buf[127], 0, "case-64-data buf[127]"); + ASSERT_EQ(buf[128], 1, "case-64-data buf[128]"); /* 128-191 memset to 1 */ + ASSERT_EQ(buf[191], 1, "case-64-data buf[191]"); + + /* Test case-128 */ + memset(buf, 2, sizeof(buf)); + tattr.data_size_in = 128; /* Determine test case via pkt size */ + tattr.data_size_out = sizeof(buf); /* Copy everything */ + err = bpf_prog_test_run_opts(prog_fd, &tattr); + + max_grow = 4096 - XDP_PACKET_HEADROOM - tailroom; /* 3520 */ + ASSERT_OK(err, "case-128"); + ASSERT_EQ(tattr.retval, XDP_TX, "case-128 retval"); + ASSERT_EQ(tattr.data_size_out, max_grow, "case-128 data_size_out"); /* Expect max grow */ + + /* Extra checks for data content: Count grow size, will contain zeros */ + for (i = 0, cnt = 0; i < sizeof(buf); i++) { + if (buf[i] == 0) + cnt++; + } + ASSERT_EQ(cnt, max_grow - tattr.data_size_in, "case-128-data cnt"); /* Grow increase */ + ASSERT_EQ(tattr.data_size_out, max_grow, "case-128-data data_size_out"); /* Total grow */ + + bpf_object__close(obj); +} + +static void test_xdp_adjust_frags_tail_shrink(void) +{ + const char *file = "./test_xdp_adjust_tail_shrink.bpf.o"; + __u32 exp_size; + struct bpf_program *prog; + struct bpf_object *obj; + int err, prog_fd; + __u8 *buf; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + /* For the individual test cases, the first byte in the packet + * indicates which test will be run. + */ + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + return; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(9000); + if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb")) + goto out; + + memset(buf, 0, 9000); + + /* Test case removing 10 bytes from last frag, NOT freeing it */ + exp_size = 8990; /* 9000 - 10 */ + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 9000; + topts.data_size_out = 9000; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb-10b"); + ASSERT_EQ(topts.retval, XDP_TX, "9Kb-10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-10b size"); + + /* Test case removing one of two pages, assuming 4K pages */ + buf[0] = 1; + exp_size = 4900; /* 9000 - 4100 */ + + topts.data_size_out = 9000; /* reset from previous invocation */ + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb-4Kb"); + ASSERT_EQ(topts.retval, XDP_TX, "9Kb-4Kb retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-4Kb size"); + + /* Test case removing two pages resulting in a linear xdp_buff */ + buf[0] = 2; + exp_size = 800; /* 9000 - 8200 */ + topts.data_size_out = 9000; /* reset from previous invocation */ + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb-9Kb"); + ASSERT_EQ(topts.retval, XDP_TX, "9Kb-9Kb retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-9Kb size"); + + free(buf); +out: + bpf_object__close(obj); +} + +static void test_xdp_adjust_frags_tail_grow(void) +{ + const char *file = "./test_xdp_adjust_tail_grow.bpf.o"; + __u32 exp_size; + struct bpf_program *prog; + struct bpf_object *obj; + int err, i, prog_fd; + __u8 *buf; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + return; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(16384); + if (!ASSERT_OK_PTR(buf, "alloc buf 16Kb")) + goto out; + + /* Test case add 10 bytes to last frag */ + memset(buf, 1, 16384); + exp_size = 9000 + 10; + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 9000; + topts.data_size_out = 16384; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb+10b"); + ASSERT_EQ(topts.retval, XDP_TX, "9Kb+10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size"); + + for (i = 0; i < 9000; i++) + ASSERT_EQ(buf[i], 1, "9Kb+10b-old"); + + for (i = 9000; i < 9010; i++) + ASSERT_EQ(buf[i], 0, "9Kb+10b-new"); + + for (i = 9010; i < 16384; i++) + ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched"); + + /* Test a too large grow */ + memset(buf, 1, 16384); + exp_size = 9001; + + topts.data_in = topts.data_out = buf; + topts.data_size_in = 9001; + topts.data_size_out = 16384; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb+10b"); + ASSERT_EQ(topts.retval, XDP_DROP, "9Kb+10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size"); + + free(buf); +out: + bpf_object__close(obj); +} + +void test_xdp_adjust_tail(void) +{ + if (test__start_subtest("xdp_adjust_tail_shrink")) + test_xdp_adjust_tail_shrink(); + if (test__start_subtest("xdp_adjust_tail_grow")) + test_xdp_adjust_tail_grow(); + if (test__start_subtest("xdp_adjust_tail_grow2")) + test_xdp_adjust_tail_grow2(); + if (test__start_subtest("xdp_adjust_frags_tail_shrink")) + test_xdp_adjust_frags_tail_shrink(); + if (test__start_subtest("xdp_adjust_frags_tail_grow")) + test_xdp_adjust_frags_tail_grow(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c new file mode 100644 index 000000000..062fbc8c8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +#define IFINDEX_LO 1 +#define XDP_FLAGS_REPLACE (1U << 4) + +void serial_test_xdp_attach(void) +{ + __u32 duration = 0, id1, id2, id0 = 0, len; + struct bpf_object *obj1, *obj2, *obj3; + const char *file = "./test_xdp.bpf.o"; + struct bpf_prog_info info = {}; + int err, fd1, fd2, fd3; + LIBBPF_OPTS(bpf_xdp_attach_opts, opts); + + len = sizeof(info); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1); + if (CHECK_FAIL(err)) + return; + err = bpf_obj_get_info_by_fd(fd1, &info, &len); + if (CHECK_FAIL(err)) + goto out_1; + id1 = info.id; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2); + if (CHECK_FAIL(err)) + goto out_1; + + memset(&info, 0, sizeof(info)); + err = bpf_obj_get_info_by_fd(fd2, &info, &len); + if (CHECK_FAIL(err)) + goto out_2; + id2 = info.id; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3); + if (CHECK_FAIL(err)) + goto out_2; + + err = bpf_xdp_attach(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE, &opts); + if (CHECK(err, "load_ok", "initial load failed")) + goto out_close; + + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); + if (CHECK(err || id0 != id1, "id1_check", + "loaded prog id %u != id1 %u, err %d", id0, id1, err)) + goto out_close; + + err = bpf_xdp_attach(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE, &opts); + if (CHECK(!err, "load_fail", "load with expected id didn't fail")) + goto out; + + opts.old_prog_fd = fd1; + err = bpf_xdp_attach(IFINDEX_LO, fd2, 0, &opts); + if (CHECK(err, "replace_ok", "replace valid old_fd failed")) + goto out; + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); + if (CHECK(err || id0 != id2, "id2_check", + "loaded prog id %u != id2 %u, err %d", id0, id2, err)) + goto out_close; + + err = bpf_xdp_attach(IFINDEX_LO, fd3, 0, &opts); + if (CHECK(!err, "replace_fail", "replace invalid old_fd didn't fail")) + goto out; + + err = bpf_xdp_detach(IFINDEX_LO, 0, &opts); + if (CHECK(!err, "remove_fail", "remove invalid old_fd didn't fail")) + goto out; + + opts.old_prog_fd = fd2; + err = bpf_xdp_detach(IFINDEX_LO, 0, &opts); + if (CHECK(err, "remove_ok", "remove valid old_fd failed")) + goto out; + + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); + if (CHECK(err || id0 != 0, "unload_check", + "loaded prog id %u != 0, err %d", id0, err)) + goto out_close; +out: + bpf_xdp_detach(IFINDEX_LO, 0, NULL); +out_close: + bpf_object__close(obj3); +out_2: + bpf_object__close(obj2); +out_1: + bpf_object__close(obj1); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c new file mode 100644 index 000000000..5e3a26b15 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 + +/** + * Test XDP bonding support + * + * Sets up two bonded veth pairs between two fresh namespaces + * and verifies that XDP_TX program loaded on a bond device + * are correctly loaded onto the slave devices and XDP_TX'd + * packets are balanced using bonding. + */ + +#define _GNU_SOURCE +#include <sched.h> +#include <net/if.h> +#include <linux/if_link.h> +#include "test_progs.h" +#include "network_helpers.h" +#include <linux/if_bonding.h> +#include <linux/limits.h> +#include <linux/udp.h> + +#include "xdp_dummy.skel.h" +#include "xdp_redirect_multi_kern.skel.h" +#include "xdp_tx.skel.h" + +#define BOND1_MAC {0x00, 0x11, 0x22, 0x33, 0x44, 0x55} +#define BOND1_MAC_STR "00:11:22:33:44:55" +#define BOND2_MAC {0x00, 0x22, 0x33, 0x44, 0x55, 0x66} +#define BOND2_MAC_STR "00:22:33:44:55:66" +#define NPACKETS 100 + +static int root_netns_fd = -1; + +static void restore_root_netns(void) +{ + ASSERT_OK(setns(root_netns_fd, CLONE_NEWNET), "restore_root_netns"); +} + +static int setns_by_name(char *name) +{ + int nsfd, err; + char nspath[PATH_MAX]; + + snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name); + nsfd = open(nspath, O_RDONLY | O_CLOEXEC); + if (nsfd < 0) + return -1; + + err = setns(nsfd, CLONE_NEWNET); + close(nsfd); + return err; +} + +static int get_rx_packets(const char *iface) +{ + FILE *f; + char line[512]; + int iface_len = strlen(iface); + + f = fopen("/proc/net/dev", "r"); + if (!f) + return -1; + + while (fgets(line, sizeof(line), f)) { + char *p = line; + + while (*p == ' ') + p++; /* skip whitespace */ + if (!strncmp(p, iface, iface_len)) { + p += iface_len; + if (*p++ != ':') + continue; + while (*p == ' ') + p++; /* skip whitespace */ + while (*p && *p != ' ') + p++; /* skip rx bytes */ + while (*p == ' ') + p++; /* skip whitespace */ + fclose(f); + return atoi(p); + } + } + fclose(f); + return -1; +} + +#define MAX_BPF_LINKS 8 + +struct skeletons { + struct xdp_dummy *xdp_dummy; + struct xdp_tx *xdp_tx; + struct xdp_redirect_multi_kern *xdp_redirect_multi_kern; + + int nlinks; + struct bpf_link *links[MAX_BPF_LINKS]; +}; + +static int xdp_attach(struct skeletons *skeletons, struct bpf_program *prog, char *iface) +{ + struct bpf_link *link; + int ifindex; + + ifindex = if_nametoindex(iface); + if (!ASSERT_GT(ifindex, 0, "get ifindex")) + return -1; + + if (!ASSERT_LE(skeletons->nlinks+1, MAX_BPF_LINKS, "too many XDP programs attached")) + return -1; + + link = bpf_program__attach_xdp(prog, ifindex); + if (!ASSERT_OK_PTR(link, "attach xdp program")) + return -1; + + skeletons->links[skeletons->nlinks++] = link; + return 0; +} + +enum { + BOND_ONE_NO_ATTACH = 0, + BOND_BOTH_AND_ATTACH, +}; + +static const char * const mode_names[] = { + [BOND_MODE_ROUNDROBIN] = "balance-rr", + [BOND_MODE_ACTIVEBACKUP] = "active-backup", + [BOND_MODE_XOR] = "balance-xor", + [BOND_MODE_BROADCAST] = "broadcast", + [BOND_MODE_8023AD] = "802.3ad", + [BOND_MODE_TLB] = "balance-tlb", + [BOND_MODE_ALB] = "balance-alb", +}; + +static const char * const xmit_policy_names[] = { + [BOND_XMIT_POLICY_LAYER2] = "layer2", + [BOND_XMIT_POLICY_LAYER34] = "layer3+4", + [BOND_XMIT_POLICY_LAYER23] = "layer2+3", + [BOND_XMIT_POLICY_ENCAP23] = "encap2+3", + [BOND_XMIT_POLICY_ENCAP34] = "encap3+4", +}; + +static int bonding_setup(struct skeletons *skeletons, int mode, int xmit_policy, + int bond_both_attach) +{ +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + return -1; \ + }) + + SYS("ip netns add ns_dst"); + SYS("ip link add veth1_1 type veth peer name veth2_1 netns ns_dst"); + SYS("ip link add veth1_2 type veth peer name veth2_2 netns ns_dst"); + + SYS("ip link add bond1 type bond mode %s xmit_hash_policy %s", + mode_names[mode], xmit_policy_names[xmit_policy]); + SYS("ip link set bond1 up address " BOND1_MAC_STR " addrgenmode none"); + SYS("ip -netns ns_dst link add bond2 type bond mode %s xmit_hash_policy %s", + mode_names[mode], xmit_policy_names[xmit_policy]); + SYS("ip -netns ns_dst link set bond2 up address " BOND2_MAC_STR " addrgenmode none"); + + SYS("ip link set veth1_1 master bond1"); + if (bond_both_attach == BOND_BOTH_AND_ATTACH) { + SYS("ip link set veth1_2 master bond1"); + } else { + SYS("ip link set veth1_2 up addrgenmode none"); + + if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "veth1_2")) + return -1; + } + + SYS("ip -netns ns_dst link set veth2_1 master bond2"); + + if (bond_both_attach == BOND_BOTH_AND_ATTACH) + SYS("ip -netns ns_dst link set veth2_2 master bond2"); + else + SYS("ip -netns ns_dst link set veth2_2 up addrgenmode none"); + + /* Load a dummy program on sending side as with veth peer needs to have a + * XDP program loaded as well. + */ + if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "bond1")) + return -1; + + if (bond_both_attach == BOND_BOTH_AND_ATTACH) { + if (!ASSERT_OK(setns_by_name("ns_dst"), "set netns to ns_dst")) + return -1; + + if (xdp_attach(skeletons, skeletons->xdp_tx->progs.xdp_tx, "bond2")) + return -1; + + restore_root_netns(); + } + + return 0; + +#undef SYS +} + +static void bonding_cleanup(struct skeletons *skeletons) +{ + restore_root_netns(); + while (skeletons->nlinks) { + skeletons->nlinks--; + bpf_link__destroy(skeletons->links[skeletons->nlinks]); + } + ASSERT_OK(system("ip link delete bond1"), "delete bond1"); + ASSERT_OK(system("ip link delete veth1_1"), "delete veth1_1"); + ASSERT_OK(system("ip link delete veth1_2"), "delete veth1_2"); + ASSERT_OK(system("ip netns delete ns_dst"), "delete ns_dst"); +} + +static int send_udp_packets(int vary_dst_ip) +{ + struct ethhdr eh = { + .h_source = BOND1_MAC, + .h_dest = BOND2_MAC, + .h_proto = htons(ETH_P_IP), + }; + struct iphdr iph = {}; + struct udphdr uh = {}; + uint8_t buf[128]; + int i, s = -1; + int ifindex; + + s = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); + if (!ASSERT_GE(s, 0, "socket")) + goto err; + + ifindex = if_nametoindex("bond1"); + if (!ASSERT_GT(ifindex, 0, "get bond1 ifindex")) + goto err; + + iph.ihl = 5; + iph.version = 4; + iph.tos = 16; + iph.id = 1; + iph.ttl = 64; + iph.protocol = IPPROTO_UDP; + iph.saddr = 1; + iph.daddr = 2; + iph.tot_len = htons(sizeof(buf) - ETH_HLEN); + iph.check = 0; + + for (i = 1; i <= NPACKETS; i++) { + int n; + struct sockaddr_ll saddr_ll = { + .sll_ifindex = ifindex, + .sll_halen = ETH_ALEN, + .sll_addr = BOND2_MAC, + }; + + /* vary the UDP destination port for even distribution with roundrobin/xor modes */ + uh.dest++; + + if (vary_dst_ip) + iph.daddr++; + + /* construct a packet */ + memcpy(buf, &eh, sizeof(eh)); + memcpy(buf + sizeof(eh), &iph, sizeof(iph)); + memcpy(buf + sizeof(eh) + sizeof(iph), &uh, sizeof(uh)); + + n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&saddr_ll, sizeof(saddr_ll)); + if (!ASSERT_EQ(n, sizeof(buf), "sendto")) + goto err; + } + + return 0; + +err: + if (s >= 0) + close(s); + return -1; +} + +static void test_xdp_bonding_with_mode(struct skeletons *skeletons, int mode, int xmit_policy) +{ + int bond1_rx; + + if (bonding_setup(skeletons, mode, xmit_policy, BOND_BOTH_AND_ATTACH)) + goto out; + + if (send_udp_packets(xmit_policy != BOND_XMIT_POLICY_LAYER34)) + goto out; + + bond1_rx = get_rx_packets("bond1"); + ASSERT_EQ(bond1_rx, NPACKETS, "expected more received packets"); + + switch (mode) { + case BOND_MODE_ROUNDROBIN: + case BOND_MODE_XOR: { + int veth1_rx = get_rx_packets("veth1_1"); + int veth2_rx = get_rx_packets("veth1_2"); + int diff = abs(veth1_rx - veth2_rx); + + ASSERT_GE(veth1_rx + veth2_rx, NPACKETS, "expected more packets"); + + switch (xmit_policy) { + case BOND_XMIT_POLICY_LAYER2: + ASSERT_GE(diff, NPACKETS, + "expected packets on only one of the interfaces"); + break; + case BOND_XMIT_POLICY_LAYER23: + case BOND_XMIT_POLICY_LAYER34: + ASSERT_LT(diff, NPACKETS/2, + "expected even distribution of packets"); + break; + default: + PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy); + break; + } + break; + } + case BOND_MODE_ACTIVEBACKUP: { + int veth1_rx = get_rx_packets("veth1_1"); + int veth2_rx = get_rx_packets("veth1_2"); + int diff = abs(veth1_rx - veth2_rx); + + ASSERT_GE(diff, NPACKETS, + "expected packets on only one of the interfaces"); + break; + } + default: + PRINT_FAIL("Unimplemented xmit_policy=%d\n", xmit_policy); + break; + } + +out: + bonding_cleanup(skeletons); +} + +/* Test the broadcast redirection using xdp_redirect_map_multi_prog and adding + * all the interfaces to it and checking that broadcasting won't send the packet + * to neither the ingress bond device (bond2) or its slave (veth2_1). + */ +static void test_xdp_bonding_redirect_multi(struct skeletons *skeletons) +{ + static const char * const ifaces[] = {"bond2", "veth2_1", "veth2_2"}; + int veth1_1_rx, veth1_2_rx; + int err; + + if (bonding_setup(skeletons, BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23, + BOND_ONE_NO_ATTACH)) + goto out; + + + if (!ASSERT_OK(setns_by_name("ns_dst"), "could not set netns to ns_dst")) + goto out; + + /* populate the devmap with the relevant interfaces */ + for (int i = 0; i < ARRAY_SIZE(ifaces); i++) { + int ifindex = if_nametoindex(ifaces[i]); + int map_fd = bpf_map__fd(skeletons->xdp_redirect_multi_kern->maps.map_all); + + if (!ASSERT_GT(ifindex, 0, "could not get interface index")) + goto out; + + err = bpf_map_update_elem(map_fd, &ifindex, &ifindex, 0); + if (!ASSERT_OK(err, "add interface to map_all")) + goto out; + } + + if (xdp_attach(skeletons, + skeletons->xdp_redirect_multi_kern->progs.xdp_redirect_map_multi_prog, + "bond2")) + goto out; + + restore_root_netns(); + + if (send_udp_packets(BOND_MODE_ROUNDROBIN)) + goto out; + + veth1_1_rx = get_rx_packets("veth1_1"); + veth1_2_rx = get_rx_packets("veth1_2"); + + ASSERT_EQ(veth1_1_rx, 0, "expected no packets on veth1_1"); + ASSERT_GE(veth1_2_rx, NPACKETS, "expected packets on veth1_2"); + +out: + restore_root_netns(); + bonding_cleanup(skeletons); +} + +/* Test that XDP programs cannot be attached to both the bond master and slaves simultaneously */ +static void test_xdp_bonding_attach(struct skeletons *skeletons) +{ + struct bpf_link *link = NULL; + struct bpf_link *link2 = NULL; + int veth, bond, err; + + if (!ASSERT_OK(system("ip link add veth type veth"), "add veth")) + goto out; + if (!ASSERT_OK(system("ip link add bond type bond"), "add bond")) + goto out; + + veth = if_nametoindex("veth"); + if (!ASSERT_GE(veth, 0, "if_nametoindex veth")) + goto out; + bond = if_nametoindex("bond"); + if (!ASSERT_GE(bond, 0, "if_nametoindex bond")) + goto out; + + /* enslaving with a XDP program loaded is allowed */ + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth); + if (!ASSERT_OK_PTR(link, "attach program to veth")) + goto out; + + err = system("ip link set veth master bond"); + if (!ASSERT_OK(err, "set veth master")) + goto out; + + bpf_link__destroy(link); + link = NULL; + + /* attaching to slave when master has no program is allowed */ + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth); + if (!ASSERT_OK_PTR(link, "attach program to slave when enslaved")) + goto out; + + /* attaching to master not allowed when slave has program loaded */ + link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); + if (!ASSERT_ERR_PTR(link2, "attach program to master when slave has program")) + goto out; + + bpf_link__destroy(link); + link = NULL; + + /* attaching XDP program to master allowed when slave has no program */ + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); + if (!ASSERT_OK_PTR(link, "attach program to master")) + goto out; + + /* attaching to slave not allowed when master has program loaded */ + link2 = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, veth); + if (!ASSERT_ERR_PTR(link2, "attach program to slave when master has program")) + goto out; + + bpf_link__destroy(link); + link = NULL; + + /* test program unwinding with a non-XDP slave */ + if (!ASSERT_OK(system("ip link add vxlan type vxlan id 1 remote 1.2.3.4 dstport 0 dev lo"), + "add vxlan")) + goto out; + + err = system("ip link set vxlan master bond"); + if (!ASSERT_OK(err, "set vxlan master")) + goto out; + + /* attaching not allowed when one slave does not support XDP */ + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); + if (!ASSERT_ERR_PTR(link, "attach program to master when slave does not support XDP")) + goto out; + +out: + bpf_link__destroy(link); + bpf_link__destroy(link2); + + system("ip link del veth"); + system("ip link del bond"); + system("ip link del vxlan"); +} + +/* Test with nested bonding devices to catch issue with negative jump label count */ +static void test_xdp_bonding_nested(struct skeletons *skeletons) +{ + struct bpf_link *link = NULL; + int bond, err; + + if (!ASSERT_OK(system("ip link add bond type bond"), "add bond")) + goto out; + + bond = if_nametoindex("bond"); + if (!ASSERT_GE(bond, 0, "if_nametoindex bond")) + goto out; + + if (!ASSERT_OK(system("ip link add bond_nest1 type bond"), "add bond_nest1")) + goto out; + + err = system("ip link set bond_nest1 master bond"); + if (!ASSERT_OK(err, "set bond_nest1 master")) + goto out; + + if (!ASSERT_OK(system("ip link add bond_nest2 type bond"), "add bond_nest1")) + goto out; + + err = system("ip link set bond_nest2 master bond_nest1"); + if (!ASSERT_OK(err, "set bond_nest2 master")) + goto out; + + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, bond); + ASSERT_OK_PTR(link, "attach program to master"); + +out: + bpf_link__destroy(link); + system("ip link del bond"); + system("ip link del bond_nest1"); + system("ip link del bond_nest2"); +} + +static int libbpf_debug_print(enum libbpf_print_level level, + const char *format, va_list args) +{ + if (level != LIBBPF_WARN) + vprintf(format, args); + return 0; +} + +struct bond_test_case { + char *name; + int mode; + int xmit_policy; +}; + +static struct bond_test_case bond_test_cases[] = { + { "xdp_bonding_roundrobin", BOND_MODE_ROUNDROBIN, BOND_XMIT_POLICY_LAYER23, }, + { "xdp_bonding_activebackup", BOND_MODE_ACTIVEBACKUP, BOND_XMIT_POLICY_LAYER23 }, + + { "xdp_bonding_xor_layer2", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER2, }, + { "xdp_bonding_xor_layer23", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER23, }, + { "xdp_bonding_xor_layer34", BOND_MODE_XOR, BOND_XMIT_POLICY_LAYER34, }, +}; + +void serial_test_xdp_bonding(void) +{ + libbpf_print_fn_t old_print_fn; + struct skeletons skeletons = {}; + int i; + + old_print_fn = libbpf_set_print(libbpf_debug_print); + + root_netns_fd = open("/proc/self/ns/net", O_RDONLY); + if (!ASSERT_GE(root_netns_fd, 0, "open /proc/self/ns/net")) + goto out; + + skeletons.xdp_dummy = xdp_dummy__open_and_load(); + if (!ASSERT_OK_PTR(skeletons.xdp_dummy, "xdp_dummy__open_and_load")) + goto out; + + skeletons.xdp_tx = xdp_tx__open_and_load(); + if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load")) + goto out; + + skeletons.xdp_redirect_multi_kern = xdp_redirect_multi_kern__open_and_load(); + if (!ASSERT_OK_PTR(skeletons.xdp_redirect_multi_kern, + "xdp_redirect_multi_kern__open_and_load")) + goto out; + + if (test__start_subtest("xdp_bonding_attach")) + test_xdp_bonding_attach(&skeletons); + + if (test__start_subtest("xdp_bonding_nested")) + test_xdp_bonding_nested(&skeletons); + + for (i = 0; i < ARRAY_SIZE(bond_test_cases); i++) { + struct bond_test_case *test_case = &bond_test_cases[i]; + + if (test__start_subtest(test_case->name)) + test_xdp_bonding_with_mode( + &skeletons, + test_case->mode, + test_case->xmit_policy); + } + + if (test__start_subtest("xdp_bonding_redirect_multi")) + test_xdp_bonding_redirect_multi(&skeletons); + +out: + xdp_dummy__destroy(skeletons.xdp_dummy); + xdp_tx__destroy(skeletons.xdp_tx); + xdp_redirect_multi_kern__destroy(skeletons.xdp_redirect_multi_kern); + + libbpf_set_print(old_print_fn); + if (root_netns_fd >= 0) + close(root_netns_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c new file mode 100644 index 000000000..76967d8ac --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include <net/if.h> +#include "test_xdp.skel.h" +#include "test_xdp_bpf2bpf.skel.h" + +struct meta { + int ifindex; + int pkt_len; +}; + +struct test_ctx_s { + bool passed; + int pkt_size; +}; + +struct test_ctx_s test_ctx; + +static void on_sample(void *ctx, int cpu, void *data, __u32 size) +{ + struct meta *meta = (struct meta *)data; + struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta); + unsigned char *raw_pkt = data + sizeof(*meta); + struct test_ctx_s *tst_ctx = ctx; + + ASSERT_GE(size, sizeof(pkt_v4) + sizeof(*meta), "check_size"); + ASSERT_EQ(meta->ifindex, if_nametoindex("lo"), "check_meta_ifindex"); + ASSERT_EQ(meta->pkt_len, tst_ctx->pkt_size, "check_meta_pkt_len"); + ASSERT_EQ(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), 0, + "check_packet_content"); + + if (meta->pkt_len > sizeof(pkt_v4)) { + for (int i = 0; i < meta->pkt_len - sizeof(pkt_v4); i++) + ASSERT_EQ(raw_pkt[i + sizeof(pkt_v4)], (unsigned char)i, + "check_packet_content"); + } + + tst_ctx->passed = true; +} + +#define BUF_SZ 9000 + +static void run_xdp_bpf2bpf_pkt_size(int pkt_fd, struct perf_buffer *pb, + struct test_xdp_bpf2bpf *ftrace_skel, + int pkt_size) +{ + __u8 *buf, *buf_in; + int err; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + if (!ASSERT_LE(pkt_size, BUF_SZ, "pkt_size") || + !ASSERT_GE(pkt_size, sizeof(pkt_v4), "pkt_size")) + return; + + buf_in = malloc(BUF_SZ); + if (!ASSERT_OK_PTR(buf_in, "buf_in malloc()")) + return; + + buf = malloc(BUF_SZ); + if (!ASSERT_OK_PTR(buf, "buf malloc()")) { + free(buf_in); + return; + } + + test_ctx.passed = false; + test_ctx.pkt_size = pkt_size; + + memcpy(buf_in, &pkt_v4, sizeof(pkt_v4)); + if (pkt_size > sizeof(pkt_v4)) { + for (int i = 0; i < (pkt_size - sizeof(pkt_v4)); i++) + buf_in[i + sizeof(pkt_v4)] = i; + } + + /* Run test program */ + topts.data_in = buf_in; + topts.data_size_in = pkt_size; + topts.data_out = buf; + topts.data_size_out = BUF_SZ; + + err = bpf_prog_test_run_opts(pkt_fd, &topts); + + ASSERT_OK(err, "ipv4"); + ASSERT_EQ(topts.retval, XDP_PASS, "ipv4 retval"); + ASSERT_EQ(topts.data_size_out, pkt_size, "ipv4 size"); + + /* Make sure bpf_xdp_output() was triggered and it sent the expected + * data to the perf ring buffer. + */ + err = perf_buffer__poll(pb, 100); + + ASSERT_GE(err, 0, "perf_buffer__poll"); + ASSERT_TRUE(test_ctx.passed, "test passed"); + /* Verify test results */ + ASSERT_EQ(ftrace_skel->bss->test_result_fentry, if_nametoindex("lo"), + "fentry result"); + ASSERT_EQ(ftrace_skel->bss->test_result_fexit, XDP_PASS, "fexit result"); + + free(buf); + free(buf_in); +} + +void test_xdp_bpf2bpf(void) +{ + int err, pkt_fd, map_fd; + int pkt_sizes[] = {sizeof(pkt_v4), 1024, 4100, 8200}; + struct iptnl_info value4 = {.family = AF_INET6}; + struct test_xdp *pkt_skel = NULL; + struct test_xdp_bpf2bpf *ftrace_skel = NULL; + struct vip key4 = {.protocol = 6, .family = AF_INET}; + struct bpf_program *prog; + struct perf_buffer *pb = NULL; + + /* Load XDP program to introspect */ + pkt_skel = test_xdp__open_and_load(); + if (!ASSERT_OK_PTR(pkt_skel, "test_xdp__open_and_load")) + return; + + pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_tx_iptunnel); + + map_fd = bpf_map__fd(pkt_skel->maps.vip2tnl); + bpf_map_update_elem(map_fd, &key4, &value4, 0); + + /* Load trace program */ + ftrace_skel = test_xdp_bpf2bpf__open(); + if (!ASSERT_OK_PTR(ftrace_skel, "test_xdp_bpf2bpf__open")) + goto out; + + /* Demonstrate the bpf_program__set_attach_target() API rather than + * the load with options, i.e. opts.attach_prog_fd. + */ + prog = ftrace_skel->progs.trace_on_entry; + bpf_program__set_expected_attach_type(prog, BPF_TRACE_FENTRY); + bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel"); + + prog = ftrace_skel->progs.trace_on_exit; + bpf_program__set_expected_attach_type(prog, BPF_TRACE_FEXIT); + bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel"); + + err = test_xdp_bpf2bpf__load(ftrace_skel); + if (!ASSERT_OK(err, "test_xdp_bpf2bpf__load")) + goto out; + + err = test_xdp_bpf2bpf__attach(ftrace_skel); + if (!ASSERT_OK(err, "test_xdp_bpf2bpf__attach")) + goto out; + + /* Set up perf buffer */ + pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 8, + on_sample, NULL, &test_ctx, NULL); + if (!ASSERT_OK_PTR(pb, "perf_buf__new")) + goto out; + + for (int i = 0; i < ARRAY_SIZE(pkt_sizes); i++) + run_xdp_bpf2bpf_pkt_size(pkt_fd, pb, ftrace_skel, + pkt_sizes[i]); +out: + perf_buffer__free(pb); + test_xdp__destroy(pkt_skel); + test_xdp_bpf2bpf__destroy(ftrace_skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c new file mode 100644 index 000000000..ab4952b9f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include "test_xdp_context_test_run.skel.h" + +void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts, + __u32 data_meta, __u32 data, __u32 data_end, + __u32 ingress_ifindex, __u32 rx_queue_index, + __u32 egress_ifindex) +{ + struct xdp_md ctx = { + .data = data, + .data_end = data_end, + .data_meta = data_meta, + .ingress_ifindex = ingress_ifindex, + .rx_queue_index = rx_queue_index, + .egress_ifindex = egress_ifindex, + }; + int err; + + opts.ctx_in = &ctx; + opts.ctx_size_in = sizeof(ctx); + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_EQ(errno, EINVAL, "errno-EINVAL"); + ASSERT_ERR(err, "bpf_prog_test_run"); +} + +void test_xdp_context_test_run(void) +{ + struct test_xdp_context_test_run *skel = NULL; + char data[sizeof(pkt_v4) + sizeof(__u32)]; + char bad_ctx[sizeof(struct xdp_md) + 1]; + struct xdp_md ctx_in, ctx_out; + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &data, + .data_size_in = sizeof(data), + .ctx_out = &ctx_out, + .ctx_size_out = sizeof(ctx_out), + .repeat = 1, + ); + int err, prog_fd; + + skel = test_xdp_context_test_run__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + return; + prog_fd = bpf_program__fd(skel->progs.xdp_context); + + /* Data past the end of the kernel's struct xdp_md must be 0 */ + bad_ctx[sizeof(bad_ctx) - 1] = 1; + opts.ctx_in = bad_ctx; + opts.ctx_size_in = sizeof(bad_ctx); + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_EQ(errno, E2BIG, "extradata-errno"); + ASSERT_ERR(err, "bpf_prog_test_run(extradata)"); + + *(__u32 *)data = XDP_PASS; + *(struct ipv4_packet *)(data + sizeof(__u32)) = pkt_v4; + opts.ctx_in = &ctx_in; + opts.ctx_size_in = sizeof(ctx_in); + memset(&ctx_in, 0, sizeof(ctx_in)); + ctx_in.data_meta = 0; + ctx_in.data = sizeof(__u32); + ctx_in.data_end = ctx_in.data + sizeof(pkt_v4); + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_OK(err, "bpf_prog_test_run(valid)"); + ASSERT_EQ(opts.retval, XDP_PASS, "valid-retval"); + ASSERT_EQ(opts.data_size_out, sizeof(pkt_v4), "valid-datasize"); + ASSERT_EQ(opts.ctx_size_out, opts.ctx_size_in, "valid-ctxsize"); + ASSERT_EQ(ctx_out.data_meta, 0, "valid-datameta"); + ASSERT_EQ(ctx_out.data, 0, "valid-data"); + ASSERT_EQ(ctx_out.data_end, sizeof(pkt_v4), "valid-dataend"); + + /* Meta data's size must be a multiple of 4 */ + test_xdp_context_error(prog_fd, opts, 0, 1, sizeof(data), 0, 0, 0); + + /* data_meta must reference the start of data */ + test_xdp_context_error(prog_fd, opts, 4, sizeof(__u32), sizeof(data), + 0, 0, 0); + + /* Meta data must be 32 bytes or smaller */ + test_xdp_context_error(prog_fd, opts, 0, 36, sizeof(data), 0, 0, 0); + + /* Total size of data must match data_end - data_meta */ + test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), + sizeof(data) - 1, 0, 0, 0); + test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), + sizeof(data) + 1, 0, 0, 0); + + /* RX queue cannot be specified without specifying an ingress */ + test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data), + 0, 1, 0); + + /* Interface 1 is always the loopback interface which always has only + * one RX queue (index 0). This makes index 1 an invalid rx queue index + * for interface 1. + */ + test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data), + 1, 1, 0); + + /* The egress cannot be specified */ + test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data), + 0, 0, 1); + + test_xdp_context_test_run__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c new file mode 100644 index 000000000..f775a1613 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <uapi/linux/bpf.h> +#include <linux/if_link.h> +#include <test_progs.h> + +#include "test_xdp_with_cpumap_frags_helpers.skel.h" +#include "test_xdp_with_cpumap_helpers.skel.h" + +#define IFINDEX_LO 1 + +static void test_xdp_with_cpumap_helpers(void) +{ + struct test_xdp_with_cpumap_helpers *skel; + struct bpf_prog_info info = {}; + __u32 len = sizeof(info); + struct bpf_cpumap_val val = { + .qsize = 192, + }; + int err, prog_fd, map_fd; + __u32 idx = 0; + + skel = test_xdp_with_cpumap_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load")) + return; + + prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog); + err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL); + if (!ASSERT_OK(err, "Generic attach of program with 8-byte CPUMAP")) + goto out_close; + + err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); + ASSERT_OK(err, "XDP program detach"); + + prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm); + map_fd = bpf_map__fd(skel->maps.cpu_map); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto out_close; + + val.bpf_prog.fd = prog_fd; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_OK(err, "Add program to cpumap entry"); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + ASSERT_OK(err, "Read cpumap entry"); + ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id"); + + /* can not attach BPF_XDP_CPUMAP program to a device */ + err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL); + if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_CPUMAP program")) + bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); + + val.qsize = 192; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry"); + + /* Try to attach BPF_XDP program with frags to cpumap when we have + * already loaded a BPF_XDP program on the map + */ + idx = 1; + val.qsize = 192; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to cpumap entry"); + +out_close: + test_xdp_with_cpumap_helpers__destroy(skel); +} + +static void test_xdp_with_cpumap_frags_helpers(void) +{ + struct test_xdp_with_cpumap_frags_helpers *skel; + struct bpf_prog_info info = {}; + __u32 len = sizeof(info); + struct bpf_cpumap_val val = { + .qsize = 192, + }; + int err, frags_prog_fd, map_fd; + __u32 idx = 0; + + skel = test_xdp_with_cpumap_frags_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load")) + return; + + frags_prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags); + map_fd = bpf_map__fd(skel->maps.cpu_map); + err = bpf_obj_get_info_by_fd(frags_prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto out_close; + + val.bpf_prog.fd = frags_prog_fd; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_OK(err, "Add program to cpumap entry"); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + ASSERT_OK(err, "Read cpumap entry"); + ASSERT_EQ(info.id, val.bpf_prog.id, + "Match program id to cpumap entry prog_id"); + + /* Try to attach BPF_XDP program to cpumap when we have + * already loaded a BPF_XDP program with frags on the map + */ + idx = 1; + val.qsize = 192; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program to cpumap entry"); + +out_close: + test_xdp_with_cpumap_frags_helpers__destroy(skel); +} + +void serial_test_xdp_cpumap_attach(void) +{ + if (test__start_subtest("CPUMAP with programs in entries")) + test_xdp_with_cpumap_helpers(); + + if (test__start_subtest("CPUMAP with frags programs in entries")) + test_xdp_with_cpumap_frags_helpers(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c new file mode 100644 index 000000000..ead40016c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <uapi/linux/bpf.h> +#include <linux/if_link.h> +#include <test_progs.h> + +#include "test_xdp_devmap_helpers.skel.h" +#include "test_xdp_with_devmap_frags_helpers.skel.h" +#include "test_xdp_with_devmap_helpers.skel.h" + +#define IFINDEX_LO 1 + +static void test_xdp_with_devmap_helpers(void) +{ + struct test_xdp_with_devmap_helpers *skel; + struct bpf_prog_info info = {}; + struct bpf_devmap_val val = { + .ifindex = IFINDEX_LO, + }; + __u32 len = sizeof(info); + int err, dm_fd, map_fd; + __u32 idx = 0; + + + skel = test_xdp_with_devmap_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load")) + return; + + dm_fd = bpf_program__fd(skel->progs.xdp_redir_prog); + err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL); + if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap")) + goto out_close; + + err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); + ASSERT_OK(err, "XDP program detach"); + + dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm); + map_fd = bpf_map__fd(skel->maps.dm_ports); + err = bpf_obj_get_info_by_fd(dm_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto out_close; + + val.bpf_prog.fd = dm_fd; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_OK(err, "Add program to devmap entry"); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + ASSERT_OK(err, "Read devmap entry"); + ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id"); + + /* can not attach BPF_XDP_DEVMAP program to a device */ + err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL); + if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_DEVMAP program")) + bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); + + val.ifindex = 1; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add non-BPF_XDP_DEVMAP program to devmap entry"); + + /* Try to attach BPF_XDP program with frags to devmap when we have + * already loaded a BPF_XDP program on the map + */ + idx = 1; + val.ifindex = 1; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm_frags); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to devmap entry"); + +out_close: + test_xdp_with_devmap_helpers__destroy(skel); +} + +static void test_neg_xdp_devmap_helpers(void) +{ + struct test_xdp_devmap_helpers *skel; + + skel = test_xdp_devmap_helpers__open_and_load(); + if (!ASSERT_EQ(skel, NULL, + "Load of XDP program accessing egress ifindex without attach type")) { + test_xdp_devmap_helpers__destroy(skel); + } +} + +static void test_xdp_with_devmap_frags_helpers(void) +{ + struct test_xdp_with_devmap_frags_helpers *skel; + struct bpf_prog_info info = {}; + struct bpf_devmap_val val = { + .ifindex = IFINDEX_LO, + }; + __u32 len = sizeof(info); + int err, dm_fd_frags, map_fd; + __u32 idx = 0; + + skel = test_xdp_with_devmap_frags_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load")) + return; + + dm_fd_frags = bpf_program__fd(skel->progs.xdp_dummy_dm_frags); + map_fd = bpf_map__fd(skel->maps.dm_ports); + err = bpf_obj_get_info_by_fd(dm_fd_frags, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto out_close; + + val.bpf_prog.fd = dm_fd_frags; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_OK(err, "Add frags program to devmap entry"); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + ASSERT_OK(err, "Read devmap entry"); + ASSERT_EQ(info.id, val.bpf_prog.id, + "Match program id to devmap entry prog_id"); + + /* Try to attach BPF_XDP program to devmap when we have + * already loaded a BPF_XDP program with frags on the map + */ + idx = 1; + val.ifindex = 1; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program to devmap entry"); + +out_close: + test_xdp_with_devmap_frags_helpers__destroy(skel); +} + +void serial_test_xdp_devmap_attach(void) +{ + if (test__start_subtest("DEVMAP with programs in entries")) + test_xdp_with_devmap_helpers(); + + if (test__start_subtest("DEVMAP with frags programs in entries")) + test_xdp_with_devmap_frags_helpers(); + + if (test__start_subtest("Verifier check of DEVMAP programs")) + test_neg_xdp_devmap_helpers(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c new file mode 100644 index 000000000..15ad33669 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include <net/if.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ipv6.h> +#include <linux/in6.h> +#include <linux/udp.h> +#include <bpf/bpf_endian.h> +#include "test_xdp_do_redirect.skel.h" + +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto out; \ + }) + +struct udp_packet { + struct ethhdr eth; + struct ipv6hdr iph; + struct udphdr udp; + __u8 payload[64 - sizeof(struct udphdr) + - sizeof(struct ethhdr) - sizeof(struct ipv6hdr)]; +} __packed; + +static struct udp_packet pkt_udp = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .eth.h_dest = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}, + .eth.h_source = {0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb}, + .iph.version = 6, + .iph.nexthdr = IPPROTO_UDP, + .iph.payload_len = bpf_htons(sizeof(struct udp_packet) + - offsetof(struct udp_packet, udp)), + .iph.hop_limit = 2, + .iph.saddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(1)}, + .iph.daddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(2)}, + .udp.source = bpf_htons(1), + .udp.dest = bpf_htons(1), + .udp.len = bpf_htons(sizeof(struct udp_packet) + - offsetof(struct udp_packet, udp)), + .payload = {0x42}, /* receiver XDP program matches on this */ +}; + +static int attach_tc_prog(struct bpf_tc_hook *hook, int fd) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd); + int ret; + + ret = bpf_tc_hook_create(hook); + if (!ASSERT_OK(ret, "create tc hook")) + return ret; + + ret = bpf_tc_attach(hook, &opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(hook); + return ret; + } + + return 0; +} + +/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) - + * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes + */ +#if defined(__s390x__) +#define MAX_PKT_SIZE 3176 +#else +#define MAX_PKT_SIZE 3368 +#endif +static void test_max_pkt_size(int fd) +{ + char data[MAX_PKT_SIZE + 1] = {}; + int err; + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &data, + .data_size_in = MAX_PKT_SIZE, + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, + .repeat = 1, + ); + err = bpf_prog_test_run_opts(fd, &opts); + ASSERT_OK(err, "prog_run_max_size"); + + opts.data_size_in += 1; + err = bpf_prog_test_run_opts(fd, &opts); + ASSERT_EQ(err, -EINVAL, "prog_run_too_big"); +} + +#define NUM_PKTS 10000 +void serial_test_xdp_do_redirect(void) +{ + int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst; + char data[sizeof(pkt_udp) + sizeof(__u32)]; + struct test_xdp_do_redirect *skel = NULL; + struct nstoken *nstoken = NULL; + struct bpf_link *link; + + struct xdp_md ctx_in = { .data = sizeof(__u32), + .data_end = sizeof(data) }; + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &data, + .data_size_in = sizeof(data), + .ctx_in = &ctx_in, + .ctx_size_in = sizeof(ctx_in), + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, + .repeat = NUM_PKTS, + .batch_size = 64, + ); + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + memcpy(&data[sizeof(__u32)], &pkt_udp, sizeof(pkt_udp)); + *((__u32 *)data) = 0x42; /* metadata test value */ + + skel = test_xdp_do_redirect__open(); + if (!ASSERT_OK_PTR(skel, "skel")) + return; + + /* The XDP program we run with bpf_prog_run() will cycle through all + * three xmit (PASS/TX/REDIRECT) return codes starting from above, and + * ending up with PASS, so we should end up with two packets on the dst + * iface and NUM_PKTS-2 in the TC hook. We match the packets on the UDP + * payload. + */ + SYS("ip netns add testns"); + nstoken = open_netns("testns"); + if (!ASSERT_OK_PTR(nstoken, "setns")) + goto out; + + SYS("ip link add veth_src type veth peer name veth_dst"); + SYS("ip link set dev veth_src address 00:11:22:33:44:55"); + SYS("ip link set dev veth_dst address 66:77:88:99:aa:bb"); + SYS("ip link set dev veth_src up"); + SYS("ip link set dev veth_dst up"); + SYS("ip addr add dev veth_src fc00::1/64"); + SYS("ip addr add dev veth_dst fc00::2/64"); + SYS("ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb"); + + /* We enable forwarding in the test namespace because that will cause + * the packets that go through the kernel stack (with XDP_PASS) to be + * forwarded back out the same interface (because of the packet dst + * combined with the interface addresses). When this happens, the + * regular forwarding path will end up going through the same + * veth_xdp_xmit() call as the XDP_REDIRECT code, which can cause a + * deadlock if it happens on the same CPU. There's a local_bh_disable() + * in the test_run code to prevent this, but an earlier version of the + * code didn't have this, so we keep the test behaviour to make sure the + * bug doesn't resurface. + */ + SYS("sysctl -qw net.ipv6.conf.all.forwarding=1"); + + ifindex_src = if_nametoindex("veth_src"); + ifindex_dst = if_nametoindex("veth_dst"); + if (!ASSERT_NEQ(ifindex_src, 0, "ifindex_src") || + !ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst")) + goto out; + + memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN); + skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */ + skel->rodata->ifindex_in = ifindex_src; + ctx_in.ingress_ifindex = ifindex_src; + tc_hook.ifindex = ifindex_src; + + if (!ASSERT_OK(test_xdp_do_redirect__load(skel), "load")) + goto out; + + link = bpf_program__attach_xdp(skel->progs.xdp_count_pkts, ifindex_dst); + if (!ASSERT_OK_PTR(link, "prog_attach")) + goto out; + skel->links.xdp_count_pkts = link; + + tc_prog_fd = bpf_program__fd(skel->progs.tc_count_pkts); + if (attach_tc_prog(&tc_hook, tc_prog_fd)) + goto out; + + xdp_prog_fd = bpf_program__fd(skel->progs.xdp_redirect); + err = bpf_prog_test_run_opts(xdp_prog_fd, &opts); + if (!ASSERT_OK(err, "prog_run")) + goto out_tc; + + /* wait for the packets to be flushed */ + kern_sync_rcu(); + + /* There will be one packet sent through XDP_REDIRECT and one through + * XDP_TX; these will show up on the XDP counting program, while the + * rest will be counted at the TC ingress hook (and the counting program + * resets the packet payload so they don't get counted twice even though + * they are re-xmited out the veth device + */ + ASSERT_EQ(skel->bss->pkts_seen_xdp, 2, "pkt_count_xdp"); + ASSERT_EQ(skel->bss->pkts_seen_zero, 2, "pkt_count_zero"); + ASSERT_EQ(skel->bss->pkts_seen_tc, NUM_PKTS - 2, "pkt_count_tc"); + + test_max_pkt_size(bpf_program__fd(skel->progs.xdp_count_pkts)); + +out_tc: + bpf_tc_hook_destroy(&tc_hook); +out: + if (nstoken) + close_netns(nstoken); + system("ip netns del testns"); + test_xdp_do_redirect__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c new file mode 100644 index 000000000..cd3aa340e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/if_link.h> +#include <test_progs.h> + +#define IFINDEX_LO 1 + +void serial_test_xdp_info(void) +{ + __u32 len = sizeof(struct bpf_prog_info), duration = 0, prog_id; + const char *file = "./xdp_dummy.bpf.o"; + struct bpf_prog_info info = {}; + struct bpf_object *obj; + int err, prog_fd; + + /* Get prog_id for XDP_ATTACHED_NONE mode */ + + err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id); + if (CHECK(err, "get_xdp_none", "errno=%d\n", errno)) + return; + if (CHECK(prog_id, "prog_id_none", "unexpected prog_id=%u\n", prog_id)) + return; + + err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id); + if (CHECK(err, "get_xdp_none_skb", "errno=%d\n", errno)) + return; + if (CHECK(prog_id, "prog_id_none_skb", "unexpected prog_id=%u\n", + prog_id)) + return; + + /* Setup prog */ + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); + if (CHECK(err, "get_prog_info", "errno=%d\n", errno)) + goto out_close; + + err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL); + if (CHECK(err, "set_xdp_skb", "errno=%d\n", errno)) + goto out_close; + + /* Get prog_id for single prog mode */ + + err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id); + if (CHECK(err, "get_xdp", "errno=%d\n", errno)) + goto out; + if (CHECK(prog_id != info.id, "prog_id", "prog_id not available\n")) + goto out; + + err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id); + if (CHECK(err, "get_xdp_skb", "errno=%d\n", errno)) + goto out; + if (CHECK(prog_id != info.id, "prog_id_skb", "prog_id not available\n")) + goto out; + + err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &prog_id); + if (CHECK(err, "get_xdp_drv", "errno=%d\n", errno)) + goto out; + if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id)) + goto out; + +out: + bpf_xdp_detach(IFINDEX_LO, 0, NULL); +out_close: + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c new file mode 100644 index 000000000..3e9d5c552 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include <uapi/linux/if_link.h> +#include <test_progs.h> +#include "test_xdp_link.skel.h" + +#define IFINDEX_LO 1 + +void serial_test_xdp_link(void) +{ + struct test_xdp_link *skel1 = NULL, *skel2 = NULL; + __u32 id1, id2, id0 = 0, prog_fd1, prog_fd2; + LIBBPF_OPTS(bpf_xdp_attach_opts, opts); + struct bpf_link_info link_info; + struct bpf_prog_info prog_info; + struct bpf_link *link; + int err; + __u32 link_info_len = sizeof(link_info); + __u32 prog_info_len = sizeof(prog_info); + + skel1 = test_xdp_link__open_and_load(); + if (!ASSERT_OK_PTR(skel1, "skel_load")) + goto cleanup; + prog_fd1 = bpf_program__fd(skel1->progs.xdp_handler); + + skel2 = test_xdp_link__open_and_load(); + if (!ASSERT_OK_PTR(skel2, "skel_load")) + goto cleanup; + prog_fd2 = bpf_program__fd(skel2->progs.xdp_handler); + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info1")) + goto cleanup; + id1 = prog_info.id; + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info2")) + goto cleanup; + id2 = prog_info.id; + + /* set initial prog attachment */ + err = bpf_xdp_attach(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts); + if (!ASSERT_OK(err, "fd_attach")) + goto cleanup; + + /* validate prog ID */ + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); + if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val")) + goto cleanup; + + /* BPF link is not allowed to replace prog attachment */ + link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + /* best-effort detach prog */ + opts.old_prog_fd = prog_fd1; + bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts); + goto cleanup; + } + + /* detach BPF program */ + opts.old_prog_fd = prog_fd1; + err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts); + if (!ASSERT_OK(err, "prog_detach")) + goto cleanup; + + /* now BPF link should attach successfully */ + link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + skel1->links.xdp_handler = link; + + /* validate prog ID */ + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); + if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val")) + goto cleanup; + + /* BPF prog attach is not allowed to replace BPF link */ + opts.old_prog_fd = prog_fd1; + err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts); + if (!ASSERT_ERR(err, "prog_attach_fail")) + goto cleanup; + + /* Can't force-update when BPF link is active */ + err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, 0, NULL); + if (!ASSERT_ERR(err, "prog_update_fail")) + goto cleanup; + + /* Can't force-detach when BPF link is active */ + err = bpf_xdp_detach(IFINDEX_LO, 0, NULL); + if (!ASSERT_ERR(err, "prog_detach_fail")) + goto cleanup; + + /* BPF link is not allowed to replace another BPF link */ + link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + bpf_link__destroy(skel1->links.xdp_handler); + skel1->links.xdp_handler = NULL; + + /* new link attach should succeed */ + link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + skel2->links.xdp_handler = link; + + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); + if (!ASSERT_OK(err, "id2_check_err") || !ASSERT_EQ(id0, id2, "id2_check_val")) + goto cleanup; + + /* updating program under active BPF link works as expected */ + err = bpf_link__update_program(link, skel1->progs.xdp_handler); + if (!ASSERT_OK(err, "link_upd")) + goto cleanup; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (!ASSERT_OK(err, "link_info")) + goto cleanup; + + ASSERT_EQ(link_info.type, BPF_LINK_TYPE_XDP, "link_type"); + ASSERT_EQ(link_info.prog_id, id1, "link_prog_id"); + ASSERT_EQ(link_info.xdp.ifindex, IFINDEX_LO, "link_ifindex"); + + /* updating program under active BPF link with different type fails */ + err = bpf_link__update_program(link, skel1->progs.tc_handler); + if (!ASSERT_ERR(err, "link_upd_invalid")) + goto cleanup; + + err = bpf_link__detach(link); + if (!ASSERT_OK(err, "link_detach")) + goto cleanup; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + + ASSERT_OK(err, "link_info"); + ASSERT_EQ(link_info.prog_id, id1, "link_prog_id"); + /* ifindex should be zeroed out */ + ASSERT_EQ(link_info.xdp.ifindex, 0, "link_ifindex"); + +cleanup: + test_xdp_link__destroy(skel1); + test_xdp_link__destroy(skel2); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c new file mode 100644 index 000000000..92ef0aa50 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include "test_xdp_noinline.skel.h" + +void test_xdp_noinline(void) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + struct test_xdp_noinline *skel; + struct vip key = {.protocol = 6}; + struct vip_meta { + __u32 flags; + __u32 vip_num; + } value = {.vip_num = VIP_NUM}; + __u32 stats_key = VIP_NUM; + struct vip_stats { + __u64 bytes; + __u64 pkts; + } stats[nr_cpus]; + struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; + } real_def = {.dst = MAGIC_VAL}; + __u32 ch_key = 11, real_num = 3; + int err, i; + __u64 bytes = 0, pkts = 0; + char buf[128]; + u32 *magic = (u32 *)buf; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = NUM_ITER, + ); + + skel = test_xdp_noinline__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + bpf_map_update_elem(bpf_map__fd(skel->maps.vip_map), &key, &value, 0); + bpf_map_update_elem(bpf_map__fd(skel->maps.ch_rings), &ch_key, &real_num, 0); + bpf_map_update_elem(bpf_map__fd(skel->maps.reals), &real_num, &real_def, 0); + + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v4), &topts); + ASSERT_OK(err, "ipv4 test_run"); + ASSERT_EQ(topts.retval, 1, "ipv4 test_run retval"); + ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out"); + ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 test_run magic"); + + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_out = buf; + topts.data_size_out = sizeof(buf); + + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v6), &topts); + ASSERT_OK(err, "ipv6 test_run"); + ASSERT_EQ(topts.retval, 1, "ipv6 test_run retval"); + ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out"); + ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 test_run magic"); + + bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), &stats_key, stats); + for (i = 0; i < nr_cpus; i++) { + bytes += stats[i].bytes; + pkts += stats[i].pkts; + } + ASSERT_EQ(bytes, MAGIC_BYTES * NUM_ITER * 2, "stats bytes"); + ASSERT_EQ(pkts, NUM_ITER * 2, "stats pkts"); + test_xdp_noinline__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c new file mode 100644 index 000000000..ec5369f24 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> + +void test_xdp_perf(void) +{ + const char *file = "./xdp_dummy.bpf.o"; + struct bpf_object *obj; + char in[128], out[128]; + int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = in, + .data_size_in = sizeof(in), + .data_out = out, + .data_size_out = sizeof(out), + .repeat = 1000000, + ); + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (CHECK_FAIL(err)) + return; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, XDP_PASS, "test_run retval"); + ASSERT_EQ(topts.data_size_out, 128, "test_run data_size_out"); + + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c new file mode 100644 index 000000000..879f5da2f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#define _GNU_SOURCE +#include <test_progs.h> +#include <network_helpers.h> +#include <ctype.h> + +#define CMD_OUT_BUF_SIZE 1023 + +#define SYS(cmd) ({ \ + if (!ASSERT_OK(system(cmd), (cmd))) \ + goto out; \ +}) + +#define SYS_OUT(cmd, ...) ({ \ + char buf[1024]; \ + snprintf(buf, sizeof(buf), (cmd), ##__VA_ARGS__); \ + FILE *f = popen(buf, "r"); \ + if (!ASSERT_OK_PTR(f, buf)) \ + goto out; \ + f; \ +}) + +/* out must be at least `size * 4 + 1` bytes long */ +static void escape_str(char *out, const char *in, size_t size) +{ + static const char *hex = "0123456789ABCDEF"; + size_t i; + + for (i = 0; i < size; i++) { + if (isprint(in[i]) && in[i] != '\\' && in[i] != '\'') { + *out++ = in[i]; + } else { + *out++ = '\\'; + *out++ = 'x'; + *out++ = hex[(in[i] >> 4) & 0xf]; + *out++ = hex[in[i] & 0xf]; + } + } + *out++ = '\0'; +} + +static bool expect_str(char *buf, size_t size, const char *str, const char *name) +{ + static char escbuf_expected[CMD_OUT_BUF_SIZE * 4]; + static char escbuf_actual[CMD_OUT_BUF_SIZE * 4]; + static int duration = 0; + bool ok; + + ok = size == strlen(str) && !memcmp(buf, str, size); + + if (!ok) { + escape_str(escbuf_expected, str, strlen(str)); + escape_str(escbuf_actual, buf, size); + } + CHECK(!ok, name, "unexpected %s: actual '%s' != expected '%s'\n", + name, escbuf_actual, escbuf_expected); + + return ok; +} + +static void test_synproxy(bool xdp) +{ + int server_fd = -1, client_fd = -1, accept_fd = -1; + char *prog_id = NULL, *prog_id_end; + struct nstoken *ns = NULL; + FILE *ctrl_file = NULL; + char buf[CMD_OUT_BUF_SIZE]; + size_t size; + + SYS("ip netns add synproxy"); + + SYS("ip link add tmp0 type veth peer name tmp1"); + SYS("ip link set tmp1 netns synproxy"); + SYS("ip link set tmp0 up"); + SYS("ip addr replace 198.18.0.1/24 dev tmp0"); + + /* When checksum offload is enabled, the XDP program sees wrong + * checksums and drops packets. + */ + SYS("ethtool -K tmp0 tx off"); + if (xdp) + /* Workaround required for veth. */ + SYS("ip link set tmp0 xdp object xdp_dummy.bpf.o section xdp 2> /dev/null"); + + ns = open_netns("synproxy"); + if (!ASSERT_OK_PTR(ns, "setns")) + goto out; + + SYS("ip link set lo up"); + SYS("ip link set tmp1 up"); + SYS("ip addr replace 198.18.0.2/24 dev tmp1"); + SYS("sysctl -w net.ipv4.tcp_syncookies=2"); + SYS("sysctl -w net.ipv4.tcp_timestamps=1"); + SYS("sysctl -w net.netfilter.nf_conntrack_tcp_loose=0"); + SYS("iptables -t raw -I PREROUTING \ + -i tmp1 -p tcp -m tcp --syn --dport 8080 -j CT --notrack"); + SYS("iptables -t filter -A INPUT \ + -i tmp1 -p tcp -m tcp --dport 8080 -m state --state INVALID,UNTRACKED \ + -j SYNPROXY --sack-perm --timestamp --wscale 7 --mss 1460"); + SYS("iptables -t filter -A INPUT \ + -i tmp1 -m state --state INVALID -j DROP"); + + ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --ports 8080 \ + --single --mss4 1460 --mss6 1440 \ + --wscale 7 --ttl 64%s", xdp ? "" : " --tc"); + size = fread(buf, 1, sizeof(buf), ctrl_file); + pclose(ctrl_file); + if (!expect_str(buf, size, "Total SYNACKs generated: 0\n", + "initial SYNACKs")) + goto out; + + if (!xdp) { + ctrl_file = SYS_OUT("tc filter show dev tmp1 ingress"); + size = fread(buf, 1, sizeof(buf), ctrl_file); + pclose(ctrl_file); + prog_id = memmem(buf, size, " id ", 4); + if (!ASSERT_OK_PTR(prog_id, "find prog id")) + goto out; + prog_id += 4; + if (!ASSERT_LT(prog_id, buf + size, "find prog id begin")) + goto out; + prog_id_end = prog_id; + while (prog_id_end < buf + size && *prog_id_end >= '0' && + *prog_id_end <= '9') + prog_id_end++; + if (!ASSERT_LT(prog_id_end, buf + size, "find prog id end")) + goto out; + *prog_id_end = '\0'; + } + + server_fd = start_server(AF_INET, SOCK_STREAM, "198.18.0.2", 8080, 0); + if (!ASSERT_GE(server_fd, 0, "start_server")) + goto out; + + close_netns(ns); + ns = NULL; + + client_fd = connect_to_fd(server_fd, 10000); + if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) + goto out; + + accept_fd = accept(server_fd, NULL, NULL); + if (!ASSERT_GE(accept_fd, 0, "accept")) + goto out; + + ns = open_netns("synproxy"); + if (!ASSERT_OK_PTR(ns, "setns")) + goto out; + + if (xdp) + ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --single"); + else + ctrl_file = SYS_OUT("./xdp_synproxy --prog %s --single", + prog_id); + size = fread(buf, 1, sizeof(buf), ctrl_file); + pclose(ctrl_file); + if (!expect_str(buf, size, "Total SYNACKs generated: 1\n", + "SYNACKs after connection")) + goto out; + +out: + if (accept_fd >= 0) + close(accept_fd); + if (client_fd >= 0) + close(client_fd); + if (server_fd >= 0) + close(server_fd); + if (ns) + close_netns(ns); + + system("ip link del tmp0"); + system("ip netns del synproxy"); +} + +void serial_test_xdp_synproxy(void) +{ + if (test__start_subtest("xdp")) + test_synproxy(true); + if (test__start_subtest("tc")) + test_synproxy(false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c new file mode 100644 index 000000000..f3927829a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include "test_progs.h" +#include "xdpwall.skel.h" + +void test_xdpwall(void) +{ + struct xdpwall *skel; + + skel = xdpwall__open_and_load(); + ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?"); + + xdpwall__destroy(skel); +} |