summaryrefslogtreecommitdiffstats
path: root/examples/bpf
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--examples/bpf/README18
-rw-r--r--examples/bpf/bpf_graft.c66
-rw-r--r--examples/bpf/bpf_map_in_map.c55
-rw-r--r--examples/bpf/bpf_shared.c53
-rw-r--r--examples/bpf/legacy/bpf_cyclic.c35
-rw-r--r--examples/bpf/legacy/bpf_graft.c66
-rw-r--r--examples/bpf/legacy/bpf_map_in_map.c56
-rw-r--r--examples/bpf/legacy/bpf_shared.c53
-rw-r--r--examples/bpf/legacy/bpf_tailcall.c117
9 files changed, 519 insertions, 0 deletions
diff --git a/examples/bpf/README b/examples/bpf/README
new file mode 100644
index 0000000..b726119
--- /dev/null
+++ b/examples/bpf/README
@@ -0,0 +1,18 @@
+eBPF toy code examples (running in kernel) to familiarize yourself
+with syntax and features:
+
+- BTF defined map examples
+ - bpf_graft.c -> Demo on altering runtime behaviour
+ - bpf_shared.c -> Ingress/egress map sharing example
+ - bpf_map_in_map.c -> Using map in map example
+
+- legacy struct bpf_elf_map defined map examples
+ - legacy/bpf_shared.c -> Ingress/egress map sharing example
+ - legacy/bpf_tailcall.c -> Using tail call chains
+ - legacy/bpf_cyclic.c -> Simple cycle as tail calls
+ - legacy/bpf_graft.c -> Demo on altering runtime behaviour
+ - legacy/bpf_map_in_map.c -> Using map in map example
+
+Note: Users should use new BTF way to defined the maps, the examples
+in legacy folder which is using struct bpf_elf_map defined maps is not
+recommanded.
diff --git a/examples/bpf/bpf_graft.c b/examples/bpf/bpf_graft.c
new file mode 100644
index 0000000..8066dcc
--- /dev/null
+++ b/examples/bpf/bpf_graft.c
@@ -0,0 +1,66 @@
+#include "../../include/bpf_api.h"
+
+/* This example demonstrates how classifier run-time behaviour
+ * can be altered with tail calls. We start out with an empty
+ * jmp_tc array, then add section aaa to the array slot 0, and
+ * later on atomically replace it with section bbb. Note that
+ * as shown in other examples, the tc loader can prepopulate
+ * tail called sections, here we start out with an empty one
+ * on purpose to show it can also be done this way.
+ *
+ * tc filter add dev foo parent ffff: bpf obj graft.o
+ * tc exec bpf dbg
+ * [...]
+ * Socket Thread-20229 [001] ..s. 138993.003923: : fallthrough
+ * <idle>-0 [001] ..s. 138993.202265: : fallthrough
+ * Socket Thread-20229 [001] ..s. 138994.004149: : fallthrough
+ * [...]
+ *
+ * tc exec bpf graft m:globals/jmp_tc key 0 obj graft.o sec aaa
+ * tc exec bpf dbg
+ * [...]
+ * Socket Thread-19818 [002] ..s. 139012.053587: : aaa
+ * <idle>-0 [002] ..s. 139012.172359: : aaa
+ * Socket Thread-19818 [001] ..s. 139012.173556: : aaa
+ * [...]
+ *
+ * tc exec bpf graft m:globals/jmp_tc key 0 obj graft.o sec bbb
+ * tc exec bpf dbg
+ * [...]
+ * Socket Thread-19818 [002] ..s. 139022.102967: : bbb
+ * <idle>-0 [002] ..s. 139022.155640: : bbb
+ * Socket Thread-19818 [001] ..s. 139022.156730: : bbb
+ * [...]
+ */
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(key_size, sizeof(uint32_t));
+ __uint(value_size, sizeof(uint32_t));
+ __uint(max_entries, 1);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+} jmp_tc __section(".maps");
+
+__section("aaa")
+int cls_aaa(struct __sk_buff *skb)
+{
+ printt("aaa\n");
+ return TC_H_MAKE(1, 42);
+}
+
+__section("bbb")
+int cls_bbb(struct __sk_buff *skb)
+{
+ printt("bbb\n");
+ return TC_H_MAKE(1, 43);
+}
+
+__section_cls_entry
+int cls_entry(struct __sk_buff *skb)
+{
+ tail_call(skb, &jmp_tc, 0);
+ printt("fallthrough\n");
+ return BPF_H_DEFAULT;
+}
+
+BPF_LICENSE("GPL");
diff --git a/examples/bpf/bpf_map_in_map.c b/examples/bpf/bpf_map_in_map.c
new file mode 100644
index 0000000..39c8626
--- /dev/null
+++ b/examples/bpf/bpf_map_in_map.c
@@ -0,0 +1,55 @@
+#include "../../include/bpf_api.h"
+
+struct inner_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(uint32_t));
+ __uint(value_size, sizeof(uint32_t));
+ __uint(max_entries, 1);
+} map_inner __section(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(key_size, sizeof(uint32_t));
+ __uint(value_size, sizeof(uint32_t));
+ __uint(max_entries, 1);
+ __uint(pinning, LIBBPF_PIN_BY_NAME);
+ __array(values, struct inner_map);
+} map_outer __section(".maps") = {
+ .values = {
+ [0] = &map_inner,
+ },
+};
+
+__section("egress")
+int emain(struct __sk_buff *skb)
+{
+ struct bpf_elf_map *map_inner;
+ int key = 0, *val;
+
+ map_inner = map_lookup_elem(&map_outer, &key);
+ if (map_inner) {
+ val = map_lookup_elem(map_inner, &key);
+ if (val)
+ lock_xadd(val, 1);
+ }
+
+ return BPF_H_DEFAULT;
+}
+
+__section("ingress")
+int imain(struct __sk_buff *skb)
+{
+ struct bpf_elf_map *map_inner;
+ int key = 0, *val;
+
+ map_inner = map_lookup_elem(&map_outer, &key);
+ if (map_inner) {
+ val = map_lookup_elem(map_inner, &key);
+ if (val)
+ printt("map val: %d\n", *val);
+ }
+
+ return BPF_H_DEFAULT;
+}
+
+BPF_LICENSE("GPL");
diff --git a/examples/bpf/bpf_shared.c b/examples/bpf/bpf_shared.c
new file mode 100644
index 0000000..99a332f
--- /dev/null
+++ b/examples/bpf/bpf_shared.c
@@ -0,0 +1,53 @@
+#include "../../include/bpf_api.h"
+
+/* Minimal, stand-alone toy map pinning example:
+ *
+ * clang -target bpf -O2 [...] -o bpf_shared.o -c bpf_shared.c
+ * tc filter add dev foo parent 1: bpf obj bpf_shared.o sec egress
+ * tc filter add dev foo parent ffff: bpf obj bpf_shared.o sec ingress
+ *
+ * Both classifier will share the very same map instance in this example,
+ * so map content can be accessed from ingress *and* egress side!
+ *
+ * This example has a pinning of PIN_OBJECT_NS, so it's private and
+ * thus shared among various program sections within the object.
+ *
+ * A setting of PIN_GLOBAL_NS would place it into a global namespace,
+ * so that it can be shared among different object files. A setting
+ * of PIN_NONE (= 0) means no sharing, so each tc invocation a new map
+ * instance is being created.
+ */
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, sizeof(uint32_t));
+ __uint(value_size, sizeof(uint32_t));
+ __uint(max_entries, 1);
+ __uint(pinning, LIBBPF_PIN_BY_NAME); /* or LIBBPF_PIN_NONE */
+} map_sh __section(".maps");
+
+__section("egress")
+int emain(struct __sk_buff *skb)
+{
+ int key = 0, *val;
+
+ val = map_lookup_elem(&map_sh, &key);
+ if (val)
+ lock_xadd(val, 1);
+
+ return BPF_H_DEFAULT;
+}
+
+__section("ingress")
+int imain(struct __sk_buff *skb)
+{
+ int key = 0, *val;
+
+ val = map_lookup_elem(&map_sh, &key);
+ if (val)
+ printt("map val: %d\n", *val);
+
+ return BPF_H_DEFAULT;
+}
+
+BPF_LICENSE("GPL");
diff --git a/examples/bpf/legacy/bpf_cyclic.c b/examples/bpf/legacy/bpf_cyclic.c
new file mode 100644
index 0000000..3359073
--- /dev/null
+++ b/examples/bpf/legacy/bpf_cyclic.c
@@ -0,0 +1,35 @@
+#include "../../../include/bpf_api.h"
+
+/* Cyclic dependency example to test the kernel's runtime upper
+ * bound on loops. Also demonstrates on how to use direct-actions,
+ * loaded as: tc filter add [...] bpf da obj [...]
+ */
+#define JMP_MAP_ID 0xabccba
+
+struct bpf_elf_map __section_maps jmp_tc = {
+ .type = BPF_MAP_TYPE_PROG_ARRAY,
+ .id = JMP_MAP_ID,
+ .size_key = sizeof(uint32_t),
+ .size_value = sizeof(uint32_t),
+ .pinning = PIN_OBJECT_NS,
+ .max_elem = 1,
+};
+
+__section_tail(JMP_MAP_ID, 0)
+int cls_loop(struct __sk_buff *skb)
+{
+ printt("cb: %u\n", skb->cb[0]++);
+ tail_call(skb, &jmp_tc, 0);
+
+ skb->tc_classid = TC_H_MAKE(1, 42);
+ return TC_ACT_OK;
+}
+
+__section_cls_entry
+int cls_entry(struct __sk_buff *skb)
+{
+ tail_call(skb, &jmp_tc, 0);
+ return TC_ACT_SHOT;
+}
+
+BPF_LICENSE("GPL");
diff --git a/examples/bpf/legacy/bpf_graft.c b/examples/bpf/legacy/bpf_graft.c
new file mode 100644
index 0000000..f4c920c
--- /dev/null
+++ b/examples/bpf/legacy/bpf_graft.c
@@ -0,0 +1,66 @@
+#include "../../../include/bpf_api.h"
+
+/* This example demonstrates how classifier run-time behaviour
+ * can be altered with tail calls. We start out with an empty
+ * jmp_tc array, then add section aaa to the array slot 0, and
+ * later on atomically replace it with section bbb. Note that
+ * as shown in other examples, the tc loader can prepopulate
+ * tail called sections, here we start out with an empty one
+ * on purpose to show it can also be done this way.
+ *
+ * tc filter add dev foo parent ffff: bpf obj graft.o
+ * tc exec bpf dbg
+ * [...]
+ * Socket Thread-20229 [001] ..s. 138993.003923: : fallthrough
+ * <idle>-0 [001] ..s. 138993.202265: : fallthrough
+ * Socket Thread-20229 [001] ..s. 138994.004149: : fallthrough
+ * [...]
+ *
+ * tc exec bpf graft m:globals/jmp_tc key 0 obj graft.o sec aaa
+ * tc exec bpf dbg
+ * [...]
+ * Socket Thread-19818 [002] ..s. 139012.053587: : aaa
+ * <idle>-0 [002] ..s. 139012.172359: : aaa
+ * Socket Thread-19818 [001] ..s. 139012.173556: : aaa
+ * [...]
+ *
+ * tc exec bpf graft m:globals/jmp_tc key 0 obj graft.o sec bbb
+ * tc exec bpf dbg
+ * [...]
+ * Socket Thread-19818 [002] ..s. 139022.102967: : bbb
+ * <idle>-0 [002] ..s. 139022.155640: : bbb
+ * Socket Thread-19818 [001] ..s. 139022.156730: : bbb
+ * [...]
+ */
+
+struct bpf_elf_map __section_maps jmp_tc = {
+ .type = BPF_MAP_TYPE_PROG_ARRAY,
+ .size_key = sizeof(uint32_t),
+ .size_value = sizeof(uint32_t),
+ .pinning = PIN_GLOBAL_NS,
+ .max_elem = 1,
+};
+
+__section("aaa")
+int cls_aaa(struct __sk_buff *skb)
+{
+ printt("aaa\n");
+ return TC_H_MAKE(1, 42);
+}
+
+__section("bbb")
+int cls_bbb(struct __sk_buff *skb)
+{
+ printt("bbb\n");
+ return TC_H_MAKE(1, 43);
+}
+
+__section_cls_entry
+int cls_entry(struct __sk_buff *skb)
+{
+ tail_call(skb, &jmp_tc, 0);
+ printt("fallthrough\n");
+ return BPF_H_DEFAULT;
+}
+
+BPF_LICENSE("GPL");
diff --git a/examples/bpf/legacy/bpf_map_in_map.c b/examples/bpf/legacy/bpf_map_in_map.c
new file mode 100644
index 0000000..575f881
--- /dev/null
+++ b/examples/bpf/legacy/bpf_map_in_map.c
@@ -0,0 +1,56 @@
+#include "../../../include/bpf_api.h"
+
+#define MAP_INNER_ID 42
+
+struct bpf_elf_map __section_maps map_inner = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .size_key = sizeof(uint32_t),
+ .size_value = sizeof(uint32_t),
+ .id = MAP_INNER_ID,
+ .inner_idx = 0,
+ .pinning = PIN_GLOBAL_NS,
+ .max_elem = 1,
+};
+
+struct bpf_elf_map __section_maps map_outer = {
+ .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
+ .size_key = sizeof(uint32_t),
+ .size_value = sizeof(uint32_t),
+ .inner_id = MAP_INNER_ID,
+ .pinning = PIN_GLOBAL_NS,
+ .max_elem = 1,
+};
+
+__section("egress")
+int emain(struct __sk_buff *skb)
+{
+ struct bpf_elf_map *map_inner;
+ int key = 0, *val;
+
+ map_inner = map_lookup_elem(&map_outer, &key);
+ if (map_inner) {
+ val = map_lookup_elem(map_inner, &key);
+ if (val)
+ lock_xadd(val, 1);
+ }
+
+ return BPF_H_DEFAULT;
+}
+
+__section("ingress")
+int imain(struct __sk_buff *skb)
+{
+ struct bpf_elf_map *map_inner;
+ int key = 0, *val;
+
+ map_inner = map_lookup_elem(&map_outer, &key);
+ if (map_inner) {
+ val = map_lookup_elem(map_inner, &key);
+ if (val)
+ printt("map val: %d\n", *val);
+ }
+
+ return BPF_H_DEFAULT;
+}
+
+BPF_LICENSE("GPL");
diff --git a/examples/bpf/legacy/bpf_shared.c b/examples/bpf/legacy/bpf_shared.c
new file mode 100644
index 0000000..05b2b9e
--- /dev/null
+++ b/examples/bpf/legacy/bpf_shared.c
@@ -0,0 +1,53 @@
+#include "../../../include/bpf_api.h"
+
+/* Minimal, stand-alone toy map pinning example:
+ *
+ * clang -target bpf -O2 [...] -o bpf_shared.o -c bpf_shared.c
+ * tc filter add dev foo parent 1: bpf obj bpf_shared.o sec egress
+ * tc filter add dev foo parent ffff: bpf obj bpf_shared.o sec ingress
+ *
+ * Both classifier will share the very same map instance in this example,
+ * so map content can be accessed from ingress *and* egress side!
+ *
+ * This example has a pinning of PIN_OBJECT_NS, so it's private and
+ * thus shared among various program sections within the object.
+ *
+ * A setting of PIN_GLOBAL_NS would place it into a global namespace,
+ * so that it can be shared among different object files. A setting
+ * of PIN_NONE (= 0) means no sharing, so each tc invocation a new map
+ * instance is being created.
+ */
+
+struct bpf_elf_map __section_maps map_sh = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .size_key = sizeof(uint32_t),
+ .size_value = sizeof(uint32_t),
+ .pinning = PIN_OBJECT_NS, /* or PIN_GLOBAL_NS, or PIN_NONE */
+ .max_elem = 1,
+};
+
+__section("egress")
+int emain(struct __sk_buff *skb)
+{
+ int key = 0, *val;
+
+ val = map_lookup_elem(&map_sh, &key);
+ if (val)
+ lock_xadd(val, 1);
+
+ return BPF_H_DEFAULT;
+}
+
+__section("ingress")
+int imain(struct __sk_buff *skb)
+{
+ int key = 0, *val;
+
+ val = map_lookup_elem(&map_sh, &key);
+ if (val)
+ printt("map val: %d\n", *val);
+
+ return BPF_H_DEFAULT;
+}
+
+BPF_LICENSE("GPL");
diff --git a/examples/bpf/legacy/bpf_tailcall.c b/examples/bpf/legacy/bpf_tailcall.c
new file mode 100644
index 0000000..8ebc554
--- /dev/null
+++ b/examples/bpf/legacy/bpf_tailcall.c
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include "../../../include/bpf_api.h"
+
+#define ENTRY_INIT 3
+#define ENTRY_0 0
+#define ENTRY_1 1
+#define MAX_JMP_SIZE 2
+
+#define FOO 42
+#define BAR 43
+
+/* This example doesn't really do anything useful, but it's purpose is to
+ * demonstrate eBPF tail calls on a very simple example.
+ *
+ * cls_entry() is our classifier entry point, from there we jump based on
+ * skb->hash into cls_case1() or cls_case2(). They are both part of the
+ * program array jmp_tc. Indicated via __section_tail(), the tc loader
+ * populates the program arrays with the loaded file descriptors already.
+ *
+ * To demonstrate nested jumps, cls_case2() jumps within the same jmp_tc
+ * array to cls_case1(). And whenever we arrive at cls_case1(), we jump
+ * into cls_exit(), part of the jump array jmp_ex.
+ *
+ * Also, to show it's possible, all programs share map_sh and dump the value
+ * that the entry point incremented. The sections that are loaded into a
+ * program array can be atomically replaced during run-time, e.g. to change
+ * classifier behaviour.
+ */
+
+struct bpf_elf_map __section_maps jmp_tc = {
+ .type = BPF_MAP_TYPE_PROG_ARRAY,
+ .id = FOO,
+ .size_key = sizeof(uint32_t),
+ .size_value = sizeof(uint32_t),
+ .pinning = PIN_OBJECT_NS,
+ .max_elem = MAX_JMP_SIZE,
+};
+
+struct bpf_elf_map __section_maps jmp_ex = {
+ .type = BPF_MAP_TYPE_PROG_ARRAY,
+ .id = BAR,
+ .size_key = sizeof(uint32_t),
+ .size_value = sizeof(uint32_t),
+ .pinning = PIN_OBJECT_NS,
+ .max_elem = 1,
+};
+
+struct bpf_elf_map __section_maps map_sh = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .size_key = sizeof(uint32_t),
+ .size_value = sizeof(uint32_t),
+ .pinning = PIN_OBJECT_NS,
+ .max_elem = 1,
+};
+
+__section_tail(FOO, ENTRY_0)
+int cls_case1(struct __sk_buff *skb)
+{
+ int key = 0, *val;
+
+ val = map_lookup_elem(&map_sh, &key);
+ if (val)
+ printt("case1: map-val: %d from:%u\n", *val, skb->cb[0]);
+
+ skb->cb[0] = ENTRY_0;
+ tail_call(skb, &jmp_ex, ENTRY_0);
+
+ return BPF_H_DEFAULT;
+}
+
+__section_tail(FOO, ENTRY_1)
+int cls_case2(struct __sk_buff *skb)
+{
+ int key = 0, *val;
+
+ val = map_lookup_elem(&map_sh, &key);
+ if (val)
+ printt("case2: map-val: %d from:%u\n", *val, skb->cb[0]);
+
+ skb->cb[0] = ENTRY_1;
+ tail_call(skb, &jmp_tc, ENTRY_0);
+
+ return BPF_H_DEFAULT;
+}
+
+__section_tail(BAR, ENTRY_0)
+int cls_exit(struct __sk_buff *skb)
+{
+ int key = 0, *val;
+
+ val = map_lookup_elem(&map_sh, &key);
+ if (val)
+ printt("exit: map-val: %d from:%u\n", *val, skb->cb[0]);
+
+ /* Termination point. */
+ return BPF_H_DEFAULT;
+}
+
+__section_cls_entry
+int cls_entry(struct __sk_buff *skb)
+{
+ int key = 0, *val;
+
+ /* For transferring state, we can use skb->cb[0] ... skb->cb[4]. */
+ val = map_lookup_elem(&map_sh, &key);
+ if (val) {
+ lock_xadd(val, 1);
+
+ skb->cb[0] = ENTRY_INIT;
+ tail_call(skb, &jmp_tc, skb->hash & (MAX_JMP_SIZE - 1));
+ }
+
+ printt("fallthrough\n");
+ return BPF_H_DEFAULT;
+}
+
+BPF_LICENSE("GPL");