summaryrefslogtreecommitdiffstats
path: root/samples
diff options
context:
space:
mode:
Diffstat (limited to 'samples')
-rw-r--r--samples/Kconfig274
-rw-r--r--samples/Makefile38
-rw-r--r--samples/acrn/Makefile12
-rw-r--r--samples/acrn/guest.ld9
-rw-r--r--samples/acrn/payload.ld9
-rw-r--r--samples/acrn/vm-sample.c136
-rw-r--r--samples/auxdisplay/.gitignore2
-rw-r--r--samples/auxdisplay/Makefile2
-rw-r--r--samples/auxdisplay/cfag12864b-example.c267
-rw-r--r--samples/binderfs/.gitignore2
-rw-r--r--samples/binderfs/Makefile4
-rw-r--r--samples/binderfs/binderfs_example.c82
-rw-r--r--samples/bpf/.gitignore63
-rw-r--r--samples/bpf/Makefile450
-rw-r--r--samples/bpf/Makefile.target75
-rw-r--r--samples/bpf/README.rst111
-rw-r--r--samples/bpf/asm_goto_workaround.h28
-rw-r--r--samples/bpf/bpf_insn.h233
-rw-r--r--samples/bpf/cookie_uid_helper_example.c332
-rw-r--r--samples/bpf/cpustat_kern.c281
-rw-r--r--samples/bpf/cpustat_user.c251
-rwxr-xr-xsamples/bpf/do_hbm_test.sh438
-rw-r--r--samples/bpf/fds_example.c195
-rw-r--r--samples/bpf/hash_func01.h55
-rw-r--r--samples/bpf/hbm.c516
-rw-r--r--samples/bpf/hbm.h38
-rw-r--r--samples/bpf/hbm_edt_kern.c168
-rw-r--r--samples/bpf/hbm_kern.h215
-rw-r--r--samples/bpf/hbm_out_kern.c179
-rw-r--r--samples/bpf/ibumad_kern.c138
-rw-r--r--samples/bpf/ibumad_user.c158
-rw-r--r--samples/bpf/lathist_kern.c99
-rw-r--r--samples/bpf/lathist_user.c130
-rwxr-xr-xsamples/bpf/lwt_len_hist.sh40
-rw-r--r--samples/bpf/lwt_len_hist_kern.c75
-rw-r--r--samples/bpf/lwt_len_hist_user.c77
-rw-r--r--samples/bpf/map_perf_test_kern.c303
-rw-r--r--samples/bpf/map_perf_test_user.c503
-rw-r--r--samples/bpf/offwaketime_kern.c158
-rw-r--r--samples/bpf/offwaketime_user.c153
-rw-r--r--samples/bpf/parse_ldabs.c43
-rw-r--r--samples/bpf/parse_simple.c49
-rw-r--r--samples/bpf/parse_varlen.c150
-rwxr-xr-xsamples/bpf/run_cookie_uid_helper_example.sh15
-rw-r--r--samples/bpf/sampleip_kern.c38
-rw-r--r--samples/bpf/sampleip_user.c227
-rw-r--r--samples/bpf/sock_example.c111
-rw-r--r--samples/bpf/sock_example.h35
-rw-r--r--samples/bpf/sock_flags_kern.c49
-rw-r--r--samples/bpf/sockex1_kern.c30
-rw-r--r--samples/bpf/sockex1_user.c63
-rw-r--r--samples/bpf/sockex2_kern.c223
-rw-r--r--samples/bpf/sockex2_user.c62
-rw-r--r--samples/bpf/sockex3_kern.c293
-rw-r--r--samples/bpf/sockex3_user.c103
-rw-r--r--samples/bpf/spintest_kern.c69
-rw-r--r--samples/bpf/spintest_user.c92
-rw-r--r--samples/bpf/syscall_nrs.c19
-rw-r--r--samples/bpf/syscall_tp_kern.c73
-rw-r--r--samples/bpf/syscall_tp_user.c138
-rw-r--r--samples/bpf/task_fd_query_kern.c19
-rw-r--r--samples/bpf/task_fd_query_user.c423
-rwxr-xr-xsamples/bpf/tc_l2_redirect.sh174
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c237
-rw-r--r--samples/bpf/tc_l2_redirect_user.c70
-rw-r--r--samples/bpf/tcbpf1_kern.c91
-rw-r--r--samples/bpf/tcp_basertt_kern.c71
-rw-r--r--samples/bpf/tcp_bpf.readme28
-rw-r--r--samples/bpf/tcp_bufs_kern.c81
-rw-r--r--samples/bpf/tcp_clamp_kern.c97
-rw-r--r--samples/bpf/tcp_cong_kern.c78
-rw-r--r--samples/bpf/tcp_dumpstats_kern.c68
-rw-r--r--samples/bpf/tcp_iw_kern.c83
-rw-r--r--samples/bpf/tcp_rwnd_kern.c64
-rw-r--r--samples/bpf/tcp_synrto_kern.c64
-rw-r--r--samples/bpf/tcp_tos_reflect_kern.c80
-rw-r--r--samples/bpf/test_cgrp2_array_pin.c106
-rw-r--r--samples/bpf/test_cgrp2_attach.c177
-rw-r--r--samples/bpf/test_cgrp2_sock.c294
-rwxr-xr-xsamples/bpf/test_cgrp2_sock.sh135
-rw-r--r--samples/bpf/test_cgrp2_sock2.c95
-rwxr-xr-xsamples/bpf/test_cgrp2_sock2.sh98
-rwxr-xr-xsamples/bpf/test_cgrp2_tc.sh185
-rw-r--r--samples/bpf/test_cgrp2_tc_kern.c70
-rwxr-xr-xsamples/bpf/test_cls_bpf.sh38
-rw-r--r--samples/bpf/test_current_task_under_cgroup_kern.c44
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c113
-rw-r--r--samples/bpf/test_lru_dist.c539
-rw-r--r--samples/bpf/test_lwt_bpf.c253
-rwxr-xr-xsamples/bpf/test_lwt_bpf.sh400
-rw-r--r--samples/bpf/test_map_in_map_kern.c176
-rw-r--r--samples/bpf/test_map_in_map_user.c168
-rw-r--r--samples/bpf/test_overhead_kprobe_kern.c49
-rw-r--r--samples/bpf/test_overhead_raw_tp_kern.c17
-rw-r--r--samples/bpf/test_overhead_tp_kern.c37
-rw-r--r--samples/bpf/test_overhead_user.c215
-rwxr-xr-xsamples/bpf/test_override_return.sh16
-rw-r--r--samples/bpf/test_probe_write_user_kern.c56
-rw-r--r--samples/bpf/test_probe_write_user_user.c108
-rw-r--r--samples/bpf/trace_common.h13
-rw-r--r--samples/bpf/trace_event_kern.c79
-rw-r--r--samples/bpf/trace_event_user.c352
-rw-r--r--samples/bpf/trace_output_kern.c31
-rw-r--r--samples/bpf/trace_output_user.c105
-rw-r--r--samples/bpf/tracex1_kern.c54
-rw-r--r--samples/bpf/tracex1_user.c50
-rw-r--r--samples/bpf/tracex2_kern.c102
-rw-r--r--samples/bpf/tracex2_user.c186
-rw-r--r--samples/bpf/tracex3_kern.c102
-rw-r--r--samples/bpf/tracex3_user.c183
-rw-r--r--samples/bpf/tracex4_kern.c55
-rw-r--r--samples/bpf/tracex4_user.c96
-rw-r--r--samples/bpf/tracex5_kern.c93
-rw-r--r--samples/bpf/tracex5_user.c98
-rw-r--r--samples/bpf/tracex6_kern.c82
-rw-r--r--samples/bpf/tracex6_user.c222
-rw-r--r--samples/bpf/tracex7_kern.c16
-rw-r--r--samples/bpf/tracex7_user.c56
-rw-r--r--samples/bpf/xdp1_kern.c100
-rw-r--r--samples/bpf/xdp1_user.c166
-rw-r--r--samples/bpf/xdp2_kern.c125
-rwxr-xr-xsamples/bpf/xdp2skb_meta.sh220
-rw-r--r--samples/bpf/xdp2skb_meta_kern.c105
-rw-r--r--samples/bpf/xdp_adjust_tail_kern.c155
-rw-r--r--samples/bpf/xdp_adjust_tail_user.c198
-rw-r--r--samples/bpf/xdp_fwd_kern.c158
-rw-r--r--samples/bpf/xdp_fwd_user.c226
-rw-r--r--samples/bpf/xdp_monitor.bpf.c8
-rw-r--r--samples/bpf/xdp_monitor_user.c118
-rw-r--r--samples/bpf/xdp_redirect.bpf.c49
-rw-r--r--samples/bpf/xdp_redirect_cpu.bpf.c539
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c559
-rw-r--r--samples/bpf/xdp_redirect_map.bpf.c97
-rw-r--r--samples/bpf/xdp_redirect_map_multi.bpf.c77
-rw-r--r--samples/bpf/xdp_redirect_map_multi_user.c232
-rw-r--r--samples/bpf/xdp_redirect_map_user.c228
-rw-r--r--samples/bpf/xdp_redirect_user.c172
-rw-r--r--samples/bpf/xdp_router_ipv4.bpf.c189
-rw-r--r--samples/bpf/xdp_router_ipv4_user.c699
-rw-r--r--samples/bpf/xdp_rxq_info_kern.c140
-rw-r--r--samples/bpf/xdp_rxq_info_user.c614
-rw-r--r--samples/bpf/xdp_sample.bpf.c266
-rw-r--r--samples/bpf/xdp_sample.bpf.h141
-rw-r--r--samples/bpf/xdp_sample_pkts_kern.c57
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c196
-rw-r--r--samples/bpf/xdp_sample_shared.h17
-rw-r--r--samples/bpf/xdp_sample_user.c1673
-rw-r--r--samples/bpf/xdp_sample_user.h110
-rw-r--r--samples/bpf/xdp_tx_iptunnel_common.h34
-rw-r--r--samples/bpf/xdp_tx_iptunnel_kern.c237
-rw-r--r--samples/bpf/xdp_tx_iptunnel_user.c310
-rw-r--r--samples/configfs/Makefile3
-rw-r--r--samples/configfs/configfs_sample.c367
-rw-r--r--samples/connector/.gitignore2
-rw-r--r--samples/connector/Makefile6
-rw-r--r--samples/connector/cn_test.c188
-rw-r--r--samples/connector/ucon.c236
-rw-r--r--samples/coresight/Makefile4
-rw-r--r--samples/coresight/coresight-cfg-sample.c73
-rw-r--r--samples/fanotify/.gitignore1
-rw-r--r--samples/fanotify/Makefile5
-rw-r--r--samples/fanotify/fs-monitor.c142
-rw-r--r--samples/fprobe/Makefile3
-rw-r--r--samples/fprobe/fprobe_example.c150
-rw-r--r--samples/ftrace/Makefile10
-rw-r--r--samples/ftrace/ftrace-direct-modify.c146
-rw-r--r--samples/ftrace/ftrace-direct-multi-modify.c160
-rw-r--r--samples/ftrace/ftrace-direct-multi.c88
-rw-r--r--samples/ftrace/ftrace-direct-too.c90
-rw-r--r--samples/ftrace/ftrace-direct.c81
-rw-r--r--samples/ftrace/sample-trace-array.c143
-rw-r--r--samples/ftrace/sample-trace-array.h84
-rw-r--r--samples/hidraw/.gitignore2
-rw-r--r--samples/hidraw/Makefile4
-rw-r--r--samples/hidraw/hid-example.c182
-rw-r--r--samples/hw_breakpoint/Makefile2
-rw-r--r--samples/hw_breakpoint/data_breakpoint.c84
-rw-r--r--samples/kdb/Makefile2
-rw-r--r--samples/kdb/kdb_hello.c58
-rw-r--r--samples/kfifo/Makefile2
-rw-r--r--samples/kfifo/bytestream-example.c195
-rw-r--r--samples/kfifo/dma-example.c141
-rw-r--r--samples/kfifo/inttype-example.c186
-rw-r--r--samples/kfifo/record-example.c202
-rw-r--r--samples/kmemleak/Makefile3
-rw-r--r--samples/kmemleak/kmemleak-test.c99
-rw-r--r--samples/kobject/Makefile2
-rw-r--r--samples/kobject/kobject-example.c144
-rw-r--r--samples/kobject/kset-example.c288
-rw-r--r--samples/kprobes/Makefile6
-rw-r--r--samples/kprobes/kprobe_example.c120
-rw-r--r--samples/kprobes/kretprobe_example.c107
-rw-r--r--samples/landlock/.gitignore1
-rw-r--r--samples/landlock/Makefile13
-rw-r--r--samples/landlock/sandboxer.c290
-rw-r--r--samples/livepatch/Makefile8
-rw-r--r--samples/livepatch/livepatch-callbacks-busymod.c60
-rw-r--r--samples/livepatch/livepatch-callbacks-demo.c196
-rw-r--r--samples/livepatch/livepatch-callbacks-mod.c41
-rw-r--r--samples/livepatch/livepatch-sample.c70
-rw-r--r--samples/livepatch/livepatch-shadow-fix1.c173
-rw-r--r--samples/livepatch/livepatch-shadow-fix2.c132
-rw-r--r--samples/livepatch/livepatch-shadow-mod.c217
-rw-r--r--samples/mei/.gitignore2
-rw-r--r--samples/mei/Makefile5
-rw-r--r--samples/mei/mei-amt-version.c488
-rw-r--r--samples/nitro_enclaves/.gitignore2
-rw-r--r--samples/nitro_enclaves/Makefile16
-rw-r--r--samples/nitro_enclaves/ne_ioctl_sample.c882
-rw-r--r--samples/pidfd/.gitignore2
-rw-r--r--samples/pidfd/Makefile4
-rw-r--r--samples/pidfd/pidfd-metadata.c120
-rw-r--r--samples/pktgen/README.rst64
-rw-r--r--samples/pktgen/functions.sh339
-rw-r--r--samples/pktgen/parameters.sh139
-rwxr-xr-xsamples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh109
-rwxr-xr-xsamples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh89
-rwxr-xr-xsamples/pktgen/pktgen_sample01_simple.sh100
-rwxr-xr-xsamples/pktgen/pktgen_sample02_multiqueue.sh107
-rwxr-xr-xsamples/pktgen/pktgen_sample03_burst_single_flow.sh106
-rwxr-xr-xsamples/pktgen/pktgen_sample04_many_flows.sh118
-rwxr-xr-xsamples/pktgen/pktgen_sample05_flow_per_thread.sh102
-rwxr-xr-xsamples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh125
-rw-r--r--samples/qmi/Makefile2
-rw-r--r--samples/qmi/qmi_sample_client.c622
-rw-r--r--samples/rpmsg/Makefile2
-rw-r--r--samples/rpmsg/rpmsg_client_sample.c96
-rw-r--r--samples/rust/Kconfig30
-rw-r--r--samples/rust/Makefile5
-rw-r--r--samples/rust/hostprogs/.gitignore3
-rw-r--r--samples/rust/hostprogs/Makefile5
-rw-r--r--samples/rust/hostprogs/a.rs7
-rw-r--r--samples/rust/hostprogs/b.rs5
-rw-r--r--samples/rust/hostprogs/single.rs12
-rw-r--r--samples/rust/rust_minimal.rs38
-rw-r--r--samples/seccomp/.gitignore5
-rw-r--r--samples/seccomp/Makefile6
-rw-r--r--samples/seccomp/bpf-direct.c191
-rw-r--r--samples/seccomp/bpf-fancy.c105
-rw-r--r--samples/seccomp/bpf-helper.c96
-rw-r--r--samples/seccomp/bpf-helper.h263
-rw-r--r--samples/seccomp/dropper.c77
-rw-r--r--samples/seccomp/user-trap.c375
-rw-r--r--samples/timers/.gitignore2
-rw-r--r--samples/timers/Makefile4
-rw-r--r--samples/timers/hpet_example.c295
-rw-r--r--samples/trace_events/Makefile17
-rw-r--r--samples/trace_events/trace-events-sample.c153
-rw-r--r--samples/trace_events/trace-events-sample.h608
-rw-r--r--samples/trace_events/trace_custom_sched.c60
-rw-r--r--samples/trace_events/trace_custom_sched.h96
-rw-r--r--samples/trace_printk/Makefile7
-rw-r--r--samples/trace_printk/trace-printk.c58
-rw-r--r--samples/uhid/.gitignore2
-rw-r--r--samples/uhid/Makefile4
-rw-r--r--samples/uhid/uhid-example.c465
-rw-r--r--samples/user_events/Makefile5
-rw-r--r--samples/user_events/example.c102
-rw-r--r--samples/v4l/Makefile2
-rw-r--r--samples/v4l/v4l2-pci-skeleton.c903
-rw-r--r--samples/vfio-mdev/Makefile5
-rw-r--r--samples/vfio-mdev/mbochs.c1471
-rw-r--r--samples/vfio-mdev/mdpy-defs.h22
-rw-r--r--samples/vfio-mdev/mdpy-fb.c243
-rw-r--r--samples/vfio-mdev/mdpy.c760
-rw-r--r--samples/vfio-mdev/mtty.c1376
-rw-r--r--samples/vfs/.gitignore3
-rw-r--r--samples/vfs/Makefile4
-rw-r--r--samples/vfs/test-fsmount.c129
-rw-r--r--samples/vfs/test-statx.c265
-rw-r--r--samples/watch_queue/.gitignore2
-rw-r--r--samples/watch_queue/Makefile4
-rw-r--r--samples/watch_queue/watch_test.c186
-rw-r--r--samples/watchdog/.gitignore2
-rw-r--r--samples/watchdog/Makefile2
-rw-r--r--samples/watchdog/watchdog-simple.c25
276 files changed, 40646 insertions, 0 deletions
diff --git a/samples/Kconfig b/samples/Kconfig
new file mode 100644
index 000000000..0d81c0028
--- /dev/null
+++ b/samples/Kconfig
@@ -0,0 +1,274 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig SAMPLES
+ bool "Sample kernel code"
+ help
+ You can build and test sample kernel code here.
+
+if SAMPLES
+
+config SAMPLE_AUXDISPLAY
+ bool "auxdisplay sample"
+ depends on CC_CAN_LINK
+
+config SAMPLE_TRACE_EVENTS
+ tristate "Build trace_events examples -- loadable modules only"
+ depends on EVENT_TRACING && m
+ help
+ This builds the trace event example module.
+
+config SAMPLE_TRACE_CUSTOM_EVENTS
+ tristate "Build custom trace event example -- loadable modules only"
+ depends on EVENT_TRACING && m
+ help
+ This builds the custom trace event example module.
+
+config SAMPLE_TRACE_PRINTK
+ tristate "Build trace_printk module - tests various trace_printk formats"
+ depends on EVENT_TRACING && m
+ help
+ This builds a module that calls trace_printk() and can be used to
+ test various trace_printk() calls from a module.
+
+config SAMPLE_FTRACE_DIRECT
+ tristate "Build register_ftrace_direct() example"
+ depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m
+ depends on HAVE_SAMPLE_FTRACE_DIRECT
+ help
+ This builds an ftrace direct function example
+ that hooks to wake_up_process and prints the parameters.
+
+config SAMPLE_FTRACE_DIRECT_MULTI
+ tristate "Build register_ftrace_direct_multi() example"
+ depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m
+ depends on HAVE_SAMPLE_FTRACE_DIRECT_MULTI
+ help
+ This builds an ftrace direct function example
+ that hooks to wake_up_process and schedule, and prints
+ the function addresses.
+
+config SAMPLE_TRACE_ARRAY
+ tristate "Build sample module for kernel access to Ftrace instancess"
+ depends on EVENT_TRACING && m
+ help
+ This builds a module that demonstrates the use of various APIs to
+ access Ftrace instances from within the kernel.
+
+config SAMPLE_KOBJECT
+ tristate "Build kobject examples"
+ help
+ This config option will allow you to build a number of
+ different kobject sample modules showing how to use kobjects,
+ ksets, and ktypes properly.
+
+ If in doubt, say "N" here.
+
+config SAMPLE_KPROBES
+ tristate "Build kprobes examples -- loadable modules only"
+ depends on KPROBES && m
+ help
+ This build several kprobes example modules.
+
+config SAMPLE_KRETPROBES
+ tristate "Build kretprobes example -- loadable modules only"
+ default m
+ depends on SAMPLE_KPROBES && KRETPROBES
+
+config SAMPLE_HW_BREAKPOINT
+ tristate "Build kernel hardware breakpoint examples -- loadable module only"
+ depends on HAVE_HW_BREAKPOINT && m
+ help
+ This builds kernel hardware breakpoint example modules.
+
+config SAMPLE_FPROBE
+ tristate "Build fprobe examples -- loadable modules only"
+ depends on FPROBE && m
+ help
+ This builds a fprobe example module. This module has an option 'symbol'.
+ You can specify a probed symbol or symbols separated with ','.
+
+config SAMPLE_KFIFO
+ tristate "Build kfifo examples -- loadable modules only"
+ depends on m
+ help
+ This config option will allow you to build a number of
+ different kfifo sample modules showing how to use the
+ generic kfifo API.
+
+ If in doubt, say "N" here.
+
+config SAMPLE_KDB
+ tristate "Build kdb command example -- loadable modules only"
+ depends on KGDB_KDB && m
+ help
+ Build an example of how to dynamically add the hello
+ command to the kdb shell.
+
+config SAMPLE_QMI_CLIENT
+ tristate "Build qmi client sample -- loadable modules only"
+ depends on m
+ depends on ARCH_QCOM
+ depends on NET
+ select QCOM_QMI_HELPERS
+ help
+ Build an QMI client sample driver, which demonstrates how to
+ communicate with a remote QRTR service, using QMI encoded messages.
+
+config SAMPLE_RPMSG_CLIENT
+ tristate "Build rpmsg client sample -- loadable modules only"
+ depends on RPMSG && m
+ help
+ Build an rpmsg client sample driver, which demonstrates how
+ to communicate with an AMP-configured remote processor over
+ the rpmsg bus.
+
+config SAMPLE_LIVEPATCH
+ tristate "Build live patching samples -- loadable modules only"
+ depends on LIVEPATCH && m
+ help
+ Build sample live patch demonstrations.
+
+config SAMPLE_CONFIGFS
+ tristate "Build configfs patching sample -- loadable modules only"
+ depends on CONFIGFS_FS && m
+ help
+ Builds a sample configfs interface.
+
+config SAMPLE_CONNECTOR
+ tristate "Build connector sample -- loadable modules only"
+ depends on CONNECTOR && HEADERS_INSTALL && m
+ help
+ When enabled, this builds both a sample kernel module for
+ the connector interface and a user space tool to communicate
+ with it.
+ See also Documentation/driver-api/connector.rst
+
+config SAMPLE_FANOTIFY_ERROR
+ bool "Build fanotify error monitoring sample"
+ depends on FANOTIFY && CC_CAN_LINK && HEADERS_INSTALL
+ help
+ When enabled, this builds an example code that uses the
+ FAN_FS_ERROR fanotify mechanism to monitor filesystem
+ errors.
+ See also Documentation/admin-guide/filesystem-monitoring.rst.
+
+config SAMPLE_HIDRAW
+ bool "hidraw sample"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+
+config SAMPLE_LANDLOCK
+ bool "Landlock example"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Build a simple Landlock sandbox manager able to start a process
+ restricted by a user-defined filesystem access control policy.
+
+config SAMPLE_PIDFD
+ bool "pidfd sample"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+
+config SAMPLE_SECCOMP
+ bool "Build seccomp sample code"
+ depends on SECCOMP_FILTER && CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Build samples of seccomp filters using various methods of
+ BPF filter construction.
+
+config SAMPLE_TIMER
+ bool "Timer sample"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+
+config SAMPLE_UHID
+ bool "UHID sample"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Build UHID sample program.
+
+config SAMPLE_VFIO_MDEV_MTTY
+ tristate "Build VFIO mtty example mediated device sample code -- loadable modules only"
+ depends on VFIO_MDEV && m
+ help
+ Build a virtual tty sample driver for use as a VFIO
+ mediated device
+
+config SAMPLE_VFIO_MDEV_MDPY
+ tristate "Build VFIO mdpy example mediated device sample code -- loadable modules only"
+ depends on VFIO_MDEV && m
+ help
+ Build a virtual display sample driver for use as a VFIO
+ mediated device. It is a simple framebuffer and supports
+ the region display interface (VFIO_GFX_PLANE_TYPE_REGION).
+
+config SAMPLE_VFIO_MDEV_MDPY_FB
+ tristate "Build VFIO mdpy example guest fbdev driver -- loadable module only"
+ depends on FB && m
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Guest fbdev driver for the virtual display sample driver.
+
+config SAMPLE_VFIO_MDEV_MBOCHS
+ tristate "Build VFIO mdpy example mediated device sample code -- loadable modules only"
+ depends on VFIO_MDEV && m
+ select DMA_SHARED_BUFFER
+ help
+ Build a virtual display sample driver for use as a VFIO
+ mediated device. It supports the region display interface
+ (VFIO_GFX_PLANE_TYPE_DMABUF).
+ Emulate enough of qemu stdvga to make bochs-drm.ko happy.
+ That is basically the vram memory bar and the bochs dispi
+ interface vbe registers in the mmio register bar.
+ Specifically it does *not* include any legacy vga stuff.
+ Device looks a lot like "qemu -device secondary-vga".
+
+config SAMPLE_ANDROID_BINDERFS
+ bool "Build Android binderfs example"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Builds a sample program to illustrate the use of the Android binderfs
+ filesystem.
+
+config SAMPLE_VFS
+ bool "Build example programs that use new VFS system calls"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Build example userspace programs that use new VFS system calls such
+ as mount API and statx(). Note that this is restricted to the x86
+ arch whilst it accesses system calls that aren't yet in all arches.
+
+config SAMPLE_INTEL_MEI
+ bool "Build example program working with intel mei driver"
+ depends on INTEL_MEI
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Build a sample program to work with mei device.
+
+config SAMPLE_WATCHDOG
+ bool "watchdog sample"
+ depends on CC_CAN_LINK
+
+config SAMPLE_WATCH_QUEUE
+ bool "Build example watch_queue notification API consumer"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Build example userspace program to use the new mount_notify(),
+ sb_notify() syscalls and the KEYCTL_WATCH_KEY keyctl() function.
+
+config SAMPLE_CORESIGHT_SYSCFG
+ tristate "Build example loadable module for CoreSight config"
+ depends on CORESIGHT && m
+ help
+ Build an example loadable module that adds new CoreSight features
+ and configuration using the CoreSight system configuration API.
+ This demonstrates how a user may create their own CoreSight
+ configurations and easily load them into the system at runtime.
+
+source "samples/rust/Kconfig"
+
+endif # SAMPLES
+
+config HAVE_SAMPLE_FTRACE_DIRECT
+ bool
+
+config HAVE_SAMPLE_FTRACE_DIRECT_MULTI
+ bool
diff --git a/samples/Makefile b/samples/Makefile
new file mode 100644
index 000000000..9832ef3f8
--- /dev/null
+++ b/samples/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Linux samples code
+
+subdir-$(CONFIG_SAMPLE_AUXDISPLAY) += auxdisplay
+subdir-$(CONFIG_SAMPLE_ANDROID_BINDERFS) += binderfs
+obj-$(CONFIG_SAMPLE_CONFIGFS) += configfs/
+obj-$(CONFIG_SAMPLE_CONNECTOR) += connector/
+obj-$(CONFIG_SAMPLE_FANOTIFY_ERROR) += fanotify/
+subdir-$(CONFIG_SAMPLE_HIDRAW) += hidraw
+obj-$(CONFIG_SAMPLE_HW_BREAKPOINT) += hw_breakpoint/
+obj-$(CONFIG_SAMPLE_KDB) += kdb/
+obj-$(CONFIG_SAMPLE_KFIFO) += kfifo/
+obj-$(CONFIG_SAMPLE_KOBJECT) += kobject/
+obj-$(CONFIG_SAMPLE_KPROBES) += kprobes/
+subdir-$(CONFIG_SAMPLE_LANDLOCK) += landlock
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch/
+subdir-$(CONFIG_SAMPLE_PIDFD) += pidfd
+obj-$(CONFIG_SAMPLE_QMI_CLIENT) += qmi/
+obj-$(CONFIG_SAMPLE_RPMSG_CLIENT) += rpmsg/
+subdir-$(CONFIG_SAMPLE_SECCOMP) += seccomp
+subdir-$(CONFIG_SAMPLE_TIMER) += timers
+obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace_events/
+obj-$(CONFIG_SAMPLE_TRACE_CUSTOM_EVENTS) += trace_events/
+obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace_printk/
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace/
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace/
+obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += ftrace/
+subdir-$(CONFIG_SAMPLE_UHID) += uhid
+obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/
+obj-y += vfio-mdev/
+subdir-$(CONFIG_SAMPLE_VFS) += vfs
+obj-$(CONFIG_SAMPLE_INTEL_MEI) += mei/
+subdir-$(CONFIG_SAMPLE_WATCHDOG) += watchdog
+subdir-$(CONFIG_SAMPLE_WATCH_QUEUE) += watch_queue
+obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak/
+obj-$(CONFIG_SAMPLE_CORESIGHT_SYSCFG) += coresight/
+obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/
+obj-$(CONFIG_SAMPLES_RUST) += rust/
diff --git a/samples/acrn/Makefile b/samples/acrn/Makefile
new file mode 100644
index 000000000..c8e3ed978
--- /dev/null
+++ b/samples/acrn/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+.PHONY: vm-sample
+
+vm-sample: vm-sample.o payload.o
+ $(CC) $^ -o $@
+
+payload.o: payload.ld guest16.o
+ $(LD) -T $< -o $@
+
+clean:
+ rm *.o vm-sample
diff --git a/samples/acrn/guest.ld b/samples/acrn/guest.ld
new file mode 100644
index 000000000..5127c682b
--- /dev/null
+++ b/samples/acrn/guest.ld
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+OUTPUT_FORMAT(binary)
+SECTIONS
+{
+ .start : { *(.start) }
+ .text : { *(.text*) }
+ .rodata : { *(.rodata) }
+ .data : { *(.data) }
+}
diff --git a/samples/acrn/payload.ld b/samples/acrn/payload.ld
new file mode 100644
index 000000000..e8d9a498a
--- /dev/null
+++ b/samples/acrn/payload.ld
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+SECTIONS
+{
+ .payload16 0 : {
+ guest16 = .;
+ guest16.o(.text)
+ guest16_end = .;
+ }
+}
diff --git a/samples/acrn/vm-sample.c b/samples/acrn/vm-sample.c
new file mode 100644
index 000000000..b2dad47a7
--- /dev/null
+++ b/samples/acrn/vm-sample.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A sample program to run a User VM on the ACRN hypervisor
+ *
+ * This sample runs in a Service VM, which is a privileged VM of ACRN.
+ * CONFIG_ACRN_HSM need to be enabled in the Service VM.
+ *
+ * Guest VM code in guest16.s will be executed after the VM launched.
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <malloc.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/ioctl.h>
+#include <linux/acrn.h>
+
+#define GUEST_MEMORY_SIZE (1024*1024)
+void *guest_memory;
+
+extern const unsigned char guest16[], guest16_end[];
+static char io_request_page[4096] __attribute__((aligned(4096)));
+static struct acrn_io_request *io_req_buf = (struct acrn_io_request *)io_request_page;
+
+__u16 vcpu_num;
+__u16 vmid;
+/* POST_STANDARD_VM_UUID1, refer to https://github.com/projectacrn/acrn-hypervisor/blob/master/hypervisor/include/common/vm_uuids.h */
+guid_t vm_uuid = GUID_INIT(0x385479d2, 0xd625, 0xe811, 0x86, 0x4e, 0xcb, 0x7a, 0x18, 0xb3, 0x46, 0x43);
+
+int hsm_fd;
+int is_running = 1;
+
+void vm_exit(int sig)
+{
+ sig = sig;
+
+ is_running = 0;
+ ioctl(hsm_fd, ACRN_IOCTL_PAUSE_VM, vmid);
+ ioctl(hsm_fd, ACRN_IOCTL_DESTROY_IOREQ_CLIENT, 0);
+}
+
+int main(int argc, char **argv)
+{
+ int vcpu_id, ret;
+ struct acrn_vm_creation create_vm = {0};
+ struct acrn_vm_memmap ram_map = {0};
+ struct acrn_vcpu_regs regs;
+ struct acrn_io_request *io_req;
+ struct acrn_ioreq_notify __attribute__((aligned(8))) notify;
+
+ argc = argc;
+ argv = argv;
+
+ guest_memory = memalign(4096, GUEST_MEMORY_SIZE);
+ if (!guest_memory) {
+ printf("No enough memory!\n");
+ return -1;
+ }
+ hsm_fd = open("/dev/acrn_hsm", O_RDWR|O_CLOEXEC);
+
+ memcpy(&create_vm.uuid, &vm_uuid, 16);
+ create_vm.ioreq_buf = (__u64)io_req_buf;
+ ret = ioctl(hsm_fd, ACRN_IOCTL_CREATE_VM, &create_vm);
+ printf("Created VM! [%d]\n", ret);
+ vcpu_num = create_vm.vcpu_num;
+ vmid = create_vm.vmid;
+
+ /* setup guest memory */
+ ram_map.type = ACRN_MEMMAP_RAM;
+ ram_map.vma_base = (__u64)guest_memory;
+ ram_map.len = GUEST_MEMORY_SIZE;
+ ram_map.user_vm_pa = 0;
+ ram_map.attr = ACRN_MEM_ACCESS_RWX;
+ ret = ioctl(hsm_fd, ACRN_IOCTL_SET_MEMSEG, &ram_map);
+ printf("Set up VM memory! [%d]\n", ret);
+
+ memcpy(guest_memory, guest16, guest16_end-guest16);
+
+ /* setup vcpu registers */
+ memset(&regs, 0, sizeof(regs));
+ regs.vcpu_id = 0;
+ regs.vcpu_regs.rip = 0;
+
+ /* CR0_ET | CR0_NE */
+ regs.vcpu_regs.cr0 = 0x30U;
+ regs.vcpu_regs.cs_ar = 0x009FU;
+ regs.vcpu_regs.cs_sel = 0xF000U;
+ regs.vcpu_regs.cs_limit = 0xFFFFU;
+ regs.vcpu_regs.cs_base = 0 & 0xFFFF0000UL;
+ regs.vcpu_regs.rip = 0 & 0xFFFFUL;
+
+ ret = ioctl(hsm_fd, ACRN_IOCTL_SET_VCPU_REGS, &regs);
+ printf("Set up VM BSP registers! [%d]\n", ret);
+
+ /* create an ioreq client for this VM */
+ ret = ioctl(hsm_fd, ACRN_IOCTL_CREATE_IOREQ_CLIENT, 0);
+ printf("Created IO request client! [%d]\n", ret);
+
+ /* run vm */
+ ret = ioctl(hsm_fd, ACRN_IOCTL_START_VM, vmid);
+ printf("Start VM! [%d]\n", ret);
+
+ signal(SIGINT, vm_exit);
+ while (is_running) {
+ ret = ioctl(hsm_fd, ACRN_IOCTL_ATTACH_IOREQ_CLIENT, 0);
+
+ for (vcpu_id = 0; vcpu_id < vcpu_num; vcpu_id++) {
+ io_req = &io_req_buf[vcpu_id];
+ if ((__sync_add_and_fetch(&io_req->processed, 0) == ACRN_IOREQ_STATE_PROCESSING)
+ && (!io_req->kernel_handled))
+ if (io_req->type == ACRN_IOREQ_TYPE_PORTIO) {
+ int bytes, port, in;
+
+ port = io_req->reqs.pio_request.address;
+ bytes = io_req->reqs.pio_request.size;
+ in = (io_req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ);
+ printf("Guest VM %s PIO[%x] with size[%x]\n", in ? "read" : "write", port, bytes);
+
+ notify.vmid = vmid;
+ notify.vcpu = vcpu_id;
+ ioctl(hsm_fd, ACRN_IOCTL_NOTIFY_REQUEST_FINISH, &notify);
+ }
+ }
+ }
+
+ ret = ioctl(hsm_fd, ACRN_IOCTL_DESTROY_VM, NULL);
+ printf("Destroy VM! [%d]\n", ret);
+ close(hsm_fd);
+ free(guest_memory);
+ return 0;
+}
diff --git a/samples/auxdisplay/.gitignore b/samples/auxdisplay/.gitignore
new file mode 100644
index 000000000..d02381684
--- /dev/null
+++ b/samples/auxdisplay/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/cfag12864b-example
diff --git a/samples/auxdisplay/Makefile b/samples/auxdisplay/Makefile
new file mode 100644
index 000000000..19d556893
--- /dev/null
+++ b/samples/auxdisplay/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+userprogs-always-y += cfag12864b-example
diff --git a/samples/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c
new file mode 100644
index 000000000..2e3bb7375
--- /dev/null
+++ b/samples/auxdisplay/cfag12864b-example.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Filename: cfag12864b-example.c
+ * Version: 0.1.0
+ * Description: cfag12864b LCD userspace example program
+ *
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
+ * Date: 2006-10-31
+ */
+
+/*
+ * ------------------------
+ * start of cfag12864b code
+ * ------------------------
+ */
+
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#define CFAG12864B_WIDTH (128)
+#define CFAG12864B_HEIGHT (64)
+#define CFAG12864B_SIZE (128 * 64 / 8)
+#define CFAG12864B_BPB (8)
+#define CFAG12864B_ADDRESS(x, y) ((y) * CFAG12864B_WIDTH / \
+ CFAG12864B_BPB + (x) / CFAG12864B_BPB)
+#define CFAG12864B_BIT(n) (((unsigned char) 1) << (n))
+
+#undef CFAG12864B_DOCHECK
+#ifdef CFAG12864B_DOCHECK
+ #define CFAG12864B_CHECK(x, y) ((x) < CFAG12864B_WIDTH && \
+ (y) < CFAG12864B_HEIGHT)
+#else
+ #define CFAG12864B_CHECK(x, y) (1)
+#endif
+
+int cfag12864b_fd;
+unsigned char * cfag12864b_mem;
+unsigned char cfag12864b_buffer[CFAG12864B_SIZE];
+
+/*
+ * init a cfag12864b framebuffer device
+ *
+ * No error: return = 0
+ * Unable to open: return = -1
+ * Unable to mmap: return = -2
+ */
+static int cfag12864b_init(char *path)
+{
+ cfag12864b_fd = open(path, O_RDWR);
+ if (cfag12864b_fd == -1)
+ return -1;
+
+ cfag12864b_mem = mmap(0, CFAG12864B_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED, cfag12864b_fd, 0);
+ if (cfag12864b_mem == MAP_FAILED) {
+ close(cfag12864b_fd);
+ return -2;
+ }
+
+ return 0;
+}
+
+/*
+ * exit a cfag12864b framebuffer device
+ */
+static void cfag12864b_exit(void)
+{
+ munmap(cfag12864b_mem, CFAG12864B_SIZE);
+ close(cfag12864b_fd);
+}
+
+/*
+ * set (x, y) pixel
+ */
+static void cfag12864b_set(unsigned char x, unsigned char y)
+{
+ if (CFAG12864B_CHECK(x, y))
+ cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] |=
+ CFAG12864B_BIT(x % CFAG12864B_BPB);
+}
+
+/*
+ * unset (x, y) pixel
+ */
+static void cfag12864b_unset(unsigned char x, unsigned char y)
+{
+ if (CFAG12864B_CHECK(x, y))
+ cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] &=
+ ~CFAG12864B_BIT(x % CFAG12864B_BPB);
+}
+
+/*
+ * is set (x, y) pixel?
+ *
+ * Pixel off: return = 0
+ * Pixel on: return = 1
+ */
+static unsigned char cfag12864b_isset(unsigned char x, unsigned char y)
+{
+ if (CFAG12864B_CHECK(x, y))
+ if (cfag12864b_buffer[CFAG12864B_ADDRESS(x, y)] &
+ CFAG12864B_BIT(x % CFAG12864B_BPB))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * not (x, y) pixel
+ */
+static void cfag12864b_not(unsigned char x, unsigned char y)
+{
+ if (cfag12864b_isset(x, y))
+ cfag12864b_unset(x, y);
+ else
+ cfag12864b_set(x, y);
+}
+
+/*
+ * fill (set all pixels)
+ */
+static void cfag12864b_fill(void)
+{
+ unsigned short i;
+
+ for (i = 0; i < CFAG12864B_SIZE; i++)
+ cfag12864b_buffer[i] = 0xFF;
+}
+
+/*
+ * clear (unset all pixels)
+ */
+static void cfag12864b_clear(void)
+{
+ unsigned short i;
+
+ for (i = 0; i < CFAG12864B_SIZE; i++)
+ cfag12864b_buffer[i] = 0;
+}
+
+/*
+ * format a [128*64] matrix
+ *
+ * Pixel off: src[i] = 0
+ * Pixel on: src[i] > 0
+ */
+static void cfag12864b_format(unsigned char * matrix)
+{
+ unsigned char i, j, n;
+
+ for (i = 0; i < CFAG12864B_HEIGHT; i++)
+ for (j = 0; j < CFAG12864B_WIDTH / CFAG12864B_BPB; j++) {
+ cfag12864b_buffer[i * CFAG12864B_WIDTH / CFAG12864B_BPB +
+ j] = 0;
+ for (n = 0; n < CFAG12864B_BPB; n++)
+ if (matrix[i * CFAG12864B_WIDTH +
+ j * CFAG12864B_BPB + n])
+ cfag12864b_buffer[i * CFAG12864B_WIDTH /
+ CFAG12864B_BPB + j] |=
+ CFAG12864B_BIT(n);
+ }
+}
+
+/*
+ * blit buffer to lcd
+ */
+static void cfag12864b_blit(void)
+{
+ memcpy(cfag12864b_mem, cfag12864b_buffer, CFAG12864B_SIZE);
+}
+
+/*
+ * ----------------------
+ * end of cfag12864b code
+ * ----------------------
+ */
+
+#include <stdio.h>
+
+#define EXAMPLES 6
+
+static void example(unsigned char n)
+{
+ unsigned short i, j;
+ unsigned char matrix[CFAG12864B_WIDTH * CFAG12864B_HEIGHT];
+
+ if (n > EXAMPLES)
+ return;
+
+ printf("Example %i/%i - ", n, EXAMPLES);
+
+ switch (n) {
+ case 1:
+ printf("Draw points setting bits");
+ cfag12864b_clear();
+ for (i = 0; i < CFAG12864B_WIDTH; i += 2)
+ for (j = 0; j < CFAG12864B_HEIGHT; j += 2)
+ cfag12864b_set(i, j);
+ break;
+
+ case 2:
+ printf("Clear the LCD");
+ cfag12864b_clear();
+ break;
+
+ case 3:
+ printf("Draw rows formatting a [128*64] matrix");
+ memset(matrix, 0, CFAG12864B_WIDTH * CFAG12864B_HEIGHT);
+ for (i = 0; i < CFAG12864B_WIDTH; i++)
+ for (j = 0; j < CFAG12864B_HEIGHT; j += 2)
+ matrix[j * CFAG12864B_WIDTH + i] = 1;
+ cfag12864b_format(matrix);
+ break;
+
+ case 4:
+ printf("Fill the lcd");
+ cfag12864b_fill();
+ break;
+
+ case 5:
+ printf("Draw columns unsetting bits");
+ for (i = 0; i < CFAG12864B_WIDTH; i += 2)
+ for (j = 0; j < CFAG12864B_HEIGHT; j++)
+ cfag12864b_unset(i, j);
+ break;
+
+ case 6:
+ printf("Do negative not-ing all bits");
+ for (i = 0; i < CFAG12864B_WIDTH; i++)
+ for (j = 0; j < CFAG12864B_HEIGHT; j ++)
+ cfag12864b_not(i, j);
+ break;
+ }
+
+ puts(" - [Press Enter]");
+}
+
+int main(int argc, char *argv[])
+{
+ unsigned char n;
+
+ if (argc != 2) {
+ printf(
+ "Syntax: %s fbdev\n"
+ "Usually: /dev/fb0, /dev/fb1...\n", argv[0]);
+ return -1;
+ }
+
+ if (cfag12864b_init(argv[1])) {
+ printf("Can't init %s fbdev\n", argv[1]);
+ return -2;
+ }
+
+ for (n = 1; n <= EXAMPLES; n++) {
+ example(n);
+ cfag12864b_blit();
+ while (getchar() != '\n');
+ }
+
+ cfag12864b_exit();
+
+ return 0;
+}
diff --git a/samples/binderfs/.gitignore b/samples/binderfs/.gitignore
new file mode 100644
index 000000000..8fa415a36
--- /dev/null
+++ b/samples/binderfs/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+/binderfs_example
diff --git a/samples/binderfs/Makefile b/samples/binderfs/Makefile
new file mode 100644
index 000000000..629e43b9b
--- /dev/null
+++ b/samples/binderfs/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+userprogs-always-y += binderfs_example
+
+userccflags += -I usr/include
diff --git a/samples/binderfs/binderfs_example.c b/samples/binderfs/binderfs_example.c
new file mode 100644
index 000000000..0fd92cdda
--- /dev/null
+++ b/samples/binderfs/binderfs_example.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <linux/android/binder.h>
+#include <linux/android/binderfs.h>
+
+int main(int argc, char *argv[])
+{
+ int fd, ret, saved_errno;
+ struct binderfs_device device = { 0 };
+
+ ret = unshare(CLONE_NEWNS);
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to unshare mount namespace\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to mount / as private\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ ret = mkdir("/dev/binderfs", 0755);
+ if (ret < 0 && errno != EEXIST) {
+ fprintf(stderr, "%s - Failed to create binderfs mountpoint\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ ret = mount(NULL, "/dev/binderfs", "binder", 0, 0);
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to mount binderfs\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ memcpy(device.name, "my-binder", strlen("my-binder"));
+
+ fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ fprintf(stderr, "%s - Failed to open binder-control device\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ ret = ioctl(fd, BINDER_CTL_ADD, &device);
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to allocate new binder device\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Allocated new binder device with major %d, minor %d, and name %s\n",
+ device.major, device.minor, device.name);
+
+ ret = unlink("/dev/binderfs/my-binder");
+ if (ret < 0) {
+ fprintf(stderr, "%s - Failed to delete binder device\n",
+ strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ /* Cleanup happens when the mount namespace dies. */
+ exit(EXIT_SUCCESS);
+}
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
new file mode 100644
index 000000000..0e7bfdbff
--- /dev/null
+++ b/samples/bpf/.gitignore
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only
+cpustat
+fds_example
+hbm
+ibumad
+lathist
+lwt_len_hist
+map_perf_test
+offwaketime
+per_socket_stats_example
+sampleip
+sock_example
+sockex1
+sockex2
+sockex3
+spintest
+syscall_nrs.h
+syscall_tp
+task_fd_query
+tc_l2_redirect
+test_cgrp2_array_pin
+test_cgrp2_attach
+test_cgrp2_attach2
+test_cgrp2_sock
+test_cgrp2_sock2
+test_current_task_under_cgroup
+test_lru_dist
+test_map_in_map
+test_overhead
+test_probe_write_user
+trace_event
+trace_output
+tracex1
+tracex2
+tracex3
+tracex4
+tracex5
+tracex6
+tracex7
+xdp1
+xdp2
+xdp_adjust_tail
+xdp_fwd
+xdp_monitor
+xdp_redirect
+xdp_redirect_cpu
+xdp_redirect_map
+xdp_redirect_map_multi
+xdp_router_ipv4
+xdp_rxq_info
+xdp_sample_pkts
+xdp_tx_iptunnel
+xdpsock
+xdpsock_ctrl_proc
+xsk_fwd
+testfile.img
+hbm_out.log
+iperf.*
+*.out
+*.skel.h
+/vmlinux.h
+/bpftool/
+/libbpf/
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
new file mode 100644
index 000000000..727da3c58
--- /dev/null
+++ b/samples/bpf/Makefile
@@ -0,0 +1,450 @@
+# SPDX-License-Identifier: GPL-2.0
+
+BPF_SAMPLES_PATH ?= $(abspath $(srctree)/$(src))
+TOOLS_PATH := $(BPF_SAMPLES_PATH)/../../tools
+
+pound := \#
+
+# List of programs to build
+tprogs-y := test_lru_dist
+tprogs-y += sock_example
+tprogs-y += fds_example
+tprogs-y += sockex1
+tprogs-y += sockex2
+tprogs-y += sockex3
+tprogs-y += tracex1
+tprogs-y += tracex2
+tprogs-y += tracex3
+tprogs-y += tracex4
+tprogs-y += tracex5
+tprogs-y += tracex6
+tprogs-y += tracex7
+tprogs-y += test_probe_write_user
+tprogs-y += trace_output
+tprogs-y += lathist
+tprogs-y += offwaketime
+tprogs-y += spintest
+tprogs-y += map_perf_test
+tprogs-y += test_overhead
+tprogs-y += test_cgrp2_array_pin
+tprogs-y += test_cgrp2_attach
+tprogs-y += test_cgrp2_sock
+tprogs-y += test_cgrp2_sock2
+tprogs-y += xdp1
+tprogs-y += xdp2
+tprogs-y += xdp_router_ipv4
+tprogs-y += test_current_task_under_cgroup
+tprogs-y += trace_event
+tprogs-y += sampleip
+tprogs-y += tc_l2_redirect
+tprogs-y += lwt_len_hist
+tprogs-y += xdp_tx_iptunnel
+tprogs-y += test_map_in_map
+tprogs-y += per_socket_stats_example
+tprogs-y += xdp_rxq_info
+tprogs-y += syscall_tp
+tprogs-y += cpustat
+tprogs-y += xdp_adjust_tail
+tprogs-y += xdp_fwd
+tprogs-y += task_fd_query
+tprogs-y += xdp_sample_pkts
+tprogs-y += ibumad
+tprogs-y += hbm
+
+tprogs-y += xdp_redirect_cpu
+tprogs-y += xdp_redirect_map_multi
+tprogs-y += xdp_redirect_map
+tprogs-y += xdp_redirect
+tprogs-y += xdp_monitor
+
+# Libbpf dependencies
+LIBBPF_SRC = $(TOOLS_PATH)/lib/bpf
+LIBBPF_OUTPUT = $(abspath $(BPF_SAMPLES_PATH))/libbpf
+LIBBPF_DESTDIR = $(LIBBPF_OUTPUT)
+LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
+LIBBPF = $(LIBBPF_OUTPUT)/libbpf.a
+
+CGROUP_HELPERS := ../../tools/testing/selftests/bpf/cgroup_helpers.o
+TRACE_HELPERS := ../../tools/testing/selftests/bpf/trace_helpers.o
+XDP_SAMPLE := xdp_sample_user.o
+
+fds_example-objs := fds_example.o
+sockex1-objs := sockex1_user.o
+sockex2-objs := sockex2_user.o
+sockex3-objs := sockex3_user.o
+tracex1-objs := tracex1_user.o $(TRACE_HELPERS)
+tracex2-objs := tracex2_user.o
+tracex3-objs := tracex3_user.o
+tracex4-objs := tracex4_user.o
+tracex5-objs := tracex5_user.o $(TRACE_HELPERS)
+tracex6-objs := tracex6_user.o
+tracex7-objs := tracex7_user.o
+test_probe_write_user-objs := test_probe_write_user_user.o
+trace_output-objs := trace_output_user.o
+lathist-objs := lathist_user.o
+offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
+spintest-objs := spintest_user.o $(TRACE_HELPERS)
+map_perf_test-objs := map_perf_test_user.o
+test_overhead-objs := test_overhead_user.o
+test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
+test_cgrp2_attach-objs := test_cgrp2_attach.o
+test_cgrp2_sock-objs := test_cgrp2_sock.o
+test_cgrp2_sock2-objs := test_cgrp2_sock2.o
+xdp1-objs := xdp1_user.o
+# reuse xdp1 source intentionally
+xdp2-objs := xdp1_user.o
+test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \
+ test_current_task_under_cgroup_user.o
+trace_event-objs := trace_event_user.o $(TRACE_HELPERS)
+sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
+tc_l2_redirect-objs := tc_l2_redirect_user.o
+lwt_len_hist-objs := lwt_len_hist_user.o
+xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
+test_map_in_map-objs := test_map_in_map_user.o
+per_socket_stats_example-objs := cookie_uid_helper_example.o
+xdp_rxq_info-objs := xdp_rxq_info_user.o
+syscall_tp-objs := syscall_tp_user.o
+cpustat-objs := cpustat_user.o
+xdp_adjust_tail-objs := xdp_adjust_tail_user.o
+xdp_fwd-objs := xdp_fwd_user.o
+task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
+xdp_sample_pkts-objs := xdp_sample_pkts_user.o
+ibumad-objs := ibumad_user.o
+hbm-objs := hbm.o $(CGROUP_HELPERS)
+
+xdp_redirect_map_multi-objs := xdp_redirect_map_multi_user.o $(XDP_SAMPLE)
+xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o $(XDP_SAMPLE)
+xdp_redirect_map-objs := xdp_redirect_map_user.o $(XDP_SAMPLE)
+xdp_redirect-objs := xdp_redirect_user.o $(XDP_SAMPLE)
+xdp_monitor-objs := xdp_monitor_user.o $(XDP_SAMPLE)
+xdp_router_ipv4-objs := xdp_router_ipv4_user.o $(XDP_SAMPLE)
+
+# Tell kbuild to always build the programs
+always-y := $(tprogs-y)
+always-y += sockex1_kern.o
+always-y += sockex2_kern.o
+always-y += sockex3_kern.o
+always-y += tracex1_kern.o
+always-y += tracex2_kern.o
+always-y += tracex3_kern.o
+always-y += tracex4_kern.o
+always-y += tracex5_kern.o
+always-y += tracex6_kern.o
+always-y += tracex7_kern.o
+always-y += sock_flags_kern.o
+always-y += test_probe_write_user_kern.o
+always-y += trace_output_kern.o
+always-y += tcbpf1_kern.o
+always-y += tc_l2_redirect_kern.o
+always-y += lathist_kern.o
+always-y += offwaketime_kern.o
+always-y += spintest_kern.o
+always-y += map_perf_test_kern.o
+always-y += test_overhead_tp_kern.o
+always-y += test_overhead_raw_tp_kern.o
+always-y += test_overhead_kprobe_kern.o
+always-y += parse_varlen.o parse_simple.o parse_ldabs.o
+always-y += test_cgrp2_tc_kern.o
+always-y += xdp1_kern.o
+always-y += xdp2_kern.o
+always-y += test_current_task_under_cgroup_kern.o
+always-y += trace_event_kern.o
+always-y += sampleip_kern.o
+always-y += lwt_len_hist_kern.o
+always-y += xdp_tx_iptunnel_kern.o
+always-y += test_map_in_map_kern.o
+always-y += tcp_synrto_kern.o
+always-y += tcp_rwnd_kern.o
+always-y += tcp_bufs_kern.o
+always-y += tcp_cong_kern.o
+always-y += tcp_iw_kern.o
+always-y += tcp_clamp_kern.o
+always-y += tcp_basertt_kern.o
+always-y += tcp_tos_reflect_kern.o
+always-y += tcp_dumpstats_kern.o
+always-y += xdp_rxq_info_kern.o
+always-y += xdp2skb_meta_kern.o
+always-y += syscall_tp_kern.o
+always-y += cpustat_kern.o
+always-y += xdp_adjust_tail_kern.o
+always-y += xdp_fwd_kern.o
+always-y += task_fd_query_kern.o
+always-y += xdp_sample_pkts_kern.o
+always-y += ibumad_kern.o
+always-y += hbm_out_kern.o
+always-y += hbm_edt_kern.o
+
+ifeq ($(ARCH), arm)
+# Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux
+# headers when arm instruction set identification is requested.
+ARM_ARCH_SELECTOR := $(filter -D__LINUX_ARM_ARCH__%, $(KBUILD_CFLAGS))
+BPF_EXTRA_CFLAGS := $(ARM_ARCH_SELECTOR)
+TPROGS_CFLAGS += $(ARM_ARCH_SELECTOR)
+endif
+
+ifeq ($(ARCH), mips)
+TPROGS_CFLAGS += -D__SANE_USERSPACE_TYPES__
+ifdef CONFIG_MACH_LOONGSON64
+BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-loongson64
+BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-generic
+endif
+endif
+
+TPROGS_CFLAGS += -Wall -O2
+TPROGS_CFLAGS += -Wmissing-prototypes
+TPROGS_CFLAGS += -Wstrict-prototypes
+
+TPROGS_CFLAGS += -I$(objtree)/usr/include
+TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
+TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE)
+TPROGS_CFLAGS += -I$(srctree)/tools/include
+TPROGS_CFLAGS += -I$(srctree)/tools/perf
+TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
+
+ifdef SYSROOT
+TPROGS_CFLAGS += --sysroot=$(SYSROOT)
+TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
+endif
+
+TPROGS_LDLIBS += $(LIBBPF) -lelf -lz
+TPROGLDLIBS_xdp_monitor += -lm
+TPROGLDLIBS_xdp_redirect += -lm
+TPROGLDLIBS_xdp_redirect_cpu += -lm
+TPROGLDLIBS_xdp_redirect_map += -lm
+TPROGLDLIBS_xdp_redirect_map_multi += -lm
+TPROGLDLIBS_xdp_router_ipv4 += -lm -pthread
+TPROGLDLIBS_tracex4 += -lrt
+TPROGLDLIBS_trace_output += -lrt
+TPROGLDLIBS_map_perf_test += -lrt
+TPROGLDLIBS_test_overhead += -lrt
+
+# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
+# make M=samples/bpf LLC=~/git/llvm-project/llvm/build/bin/llc CLANG=~/git/llvm-project/llvm/build/bin/clang
+LLC ?= llc
+CLANG ?= clang
+OPT ?= opt
+LLVM_DIS ?= llvm-dis
+LLVM_OBJCOPY ?= llvm-objcopy
+LLVM_READELF ?= llvm-readelf
+BTF_PAHOLE ?= pahole
+
+# Detect that we're cross compiling and use the cross compiler
+ifdef CROSS_COMPILE
+CLANG_ARCH_ARGS = --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif
+
+# Don't evaluate probes and warnings if we need to run make recursively
+ifneq ($(src),)
+HDR_PROBE := $(shell printf "$(pound)include <linux/types.h>\n struct list_head { int a; }; int main() { return 0; }" | \
+ $(CC) $(TPROGS_CFLAGS) $(TPROGS_LDFLAGS) -x c - \
+ -o /dev/null 2>/dev/null && echo okay)
+
+ifeq ($(HDR_PROBE),)
+$(warning WARNING: Detected possible issues with include path.)
+$(warning WARNING: Please install kernel headers locally (make headers_install).)
+endif
+
+BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
+BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
+BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
+BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
+ $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
+ $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
+ /bin/rm -f ./llvm_btf_verify.o)
+
+BPF_EXTRA_CFLAGS += -fno-stack-protector
+ifneq ($(BTF_LLVM_PROBE),)
+ BPF_EXTRA_CFLAGS += -g
+else
+ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),)
+ BPF_EXTRA_CFLAGS += -g
+ LLC_FLAGS += -mattr=dwarfris
+ DWARF2BTF = y
+endif
+endif
+endif
+
+# Trick to allow make to be run from this directory
+all:
+ $(MAKE) -C ../../ M=$(CURDIR) BPF_SAMPLES_PATH=$(CURDIR)
+
+clean:
+ $(MAKE) -C ../../ M=$(CURDIR) clean
+ @find $(CURDIR) -type f -name '*~' -delete
+ @$(RM) -r $(CURDIR)/libbpf $(CURDIR)/bpftool
+
+$(LIBBPF): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT)
+# Fix up variables inherited from Kbuild that tools/ build system won't like
+ $(MAKE) -C $(LIBBPF_SRC) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \
+ LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ \
+ O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= \
+ $@ install_headers
+
+BPFTOOLDIR := $(TOOLS_PATH)/bpf/bpftool
+BPFTOOL_OUTPUT := $(abspath $(BPF_SAMPLES_PATH))/bpftool
+BPFTOOL := $(BPFTOOL_OUTPUT)/bootstrap/bpftool
+$(BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) | $(BPFTOOL_OUTPUT)
+ $(MAKE) -C $(BPFTOOLDIR) srctree=$(BPF_SAMPLES_PATH)/../../ \
+ OUTPUT=$(BPFTOOL_OUTPUT)/ bootstrap
+
+$(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT):
+ $(call msg,MKDIR,$@)
+ $(Q)mkdir -p $@
+
+$(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE
+ $(call filechk,offsets,__SYSCALL_NRS_H__)
+
+targets += syscall_nrs.s
+clean-files += syscall_nrs.h
+
+FORCE:
+
+
+# Verify LLVM compiler tools are available and bpf target is supported by llc
+.PHONY: verify_cmds verify_target_bpf $(CLANG) $(LLC)
+
+verify_cmds: $(CLANG) $(LLC)
+ @for TOOL in $^ ; do \
+ if ! (which -- "$${TOOL}" > /dev/null 2>&1); then \
+ echo "*** ERROR: Cannot find LLVM tool $${TOOL}" ;\
+ exit 1; \
+ else true; fi; \
+ done
+
+verify_target_bpf: verify_cmds
+ @if ! (${LLC} -march=bpf -mattr=help > /dev/null 2>&1); then \
+ echo "*** ERROR: LLVM (${LLC}) does not support 'bpf' target" ;\
+ echo " NOTICE: LLVM version >= 3.7.1 required" ;\
+ exit 2; \
+ else true; fi
+
+$(BPF_SAMPLES_PATH)/*.c: verify_target_bpf $(LIBBPF)
+$(src)/*.c: verify_target_bpf $(LIBBPF)
+
+libbpf_hdrs: $(LIBBPF)
+$(obj)/$(TRACE_HELPERS) $(obj)/$(CGROUP_HELPERS) $(obj)/$(XDP_SAMPLE): | libbpf_hdrs
+
+.PHONY: libbpf_hdrs
+
+$(obj)/xdp_redirect_cpu_user.o: $(obj)/xdp_redirect_cpu.skel.h
+$(obj)/xdp_redirect_map_multi_user.o: $(obj)/xdp_redirect_map_multi.skel.h
+$(obj)/xdp_redirect_map_user.o: $(obj)/xdp_redirect_map.skel.h
+$(obj)/xdp_redirect_user.o: $(obj)/xdp_redirect.skel.h
+$(obj)/xdp_monitor_user.o: $(obj)/xdp_monitor.skel.h
+$(obj)/xdp_router_ipv4_user.o: $(obj)/xdp_router_ipv4.skel.h
+
+$(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
+$(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
+$(obj)/hbm.o: $(src)/hbm.h
+$(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
+
+# Override includes for xdp_sample_user.o because $(srctree)/usr/include in
+# TPROGS_CFLAGS causes conflicts
+XDP_SAMPLE_CFLAGS += -Wall -O2 \
+ -I$(src)/../../tools/include \
+ -I$(src)/../../tools/include/uapi \
+ -I$(LIBBPF_INCLUDE) \
+ -I$(src)/../../tools/testing/selftests/bpf
+
+$(obj)/$(XDP_SAMPLE): TPROGS_CFLAGS = $(XDP_SAMPLE_CFLAGS)
+$(obj)/$(XDP_SAMPLE): $(src)/xdp_sample_user.h $(src)/xdp_sample_shared.h
+
+-include $(BPF_SAMPLES_PATH)/Makefile.target
+
+VMLINUX_BTF_PATHS ?= $(abspath $(if $(O),$(O)/vmlinux)) \
+ $(abspath $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)) \
+ $(abspath ./vmlinux)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+
+$(obj)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL)
+ifeq ($(VMLINUX_H),)
+ifeq ($(VMLINUX_BTF),)
+ $(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)",\
+ build the kernel or set VMLINUX_BTF or VMLINUX_H variable)
+endif
+ $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+else
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+clean-files += vmlinux.h
+
+# Get Clang's default includes on this system, as opposed to those seen by
+# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
+#
+# Use '-idirafter': Don't interfere with include mechanics except where the
+# build would have failed anyways.
+define get_sys_includes
+$(shell $(1) -v -E - </dev/null 2>&1 \
+ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
+$(shell $(1) -dM -E - </dev/null | grep '#define __riscv_xlen ' | sed 's/#define /-D/' | sed 's/ /=/')
+endef
+
+CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
+
+$(obj)/xdp_redirect_cpu.bpf.o: $(obj)/xdp_sample.bpf.o
+$(obj)/xdp_redirect_map_multi.bpf.o: $(obj)/xdp_sample.bpf.o
+$(obj)/xdp_redirect_map.bpf.o: $(obj)/xdp_sample.bpf.o
+$(obj)/xdp_redirect.bpf.o: $(obj)/xdp_sample.bpf.o
+$(obj)/xdp_monitor.bpf.o: $(obj)/xdp_sample.bpf.o
+$(obj)/xdp_router_ipv4.bpf.o: $(obj)/xdp_sample.bpf.o
+
+$(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/xdp_sample_shared.h
+ @echo " CLANG-BPF " $@
+ $(Q)$(CLANG) -g -O2 -target bpf -D__TARGET_ARCH_$(SRCARCH) \
+ -Wno-compare-distinct-pointer-types -I$(srctree)/include \
+ -I$(srctree)/samples/bpf -I$(srctree)/tools/include \
+ -I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES) \
+ -c $(filter %.bpf.c,$^) -o $@
+
+LINKED_SKELS := xdp_redirect_cpu.skel.h xdp_redirect_map_multi.skel.h \
+ xdp_redirect_map.skel.h xdp_redirect.skel.h xdp_monitor.skel.h \
+ xdp_router_ipv4.skel.h
+clean-files += $(LINKED_SKELS)
+
+xdp_redirect_cpu.skel.h-deps := xdp_redirect_cpu.bpf.o xdp_sample.bpf.o
+xdp_redirect_map_multi.skel.h-deps := xdp_redirect_map_multi.bpf.o xdp_sample.bpf.o
+xdp_redirect_map.skel.h-deps := xdp_redirect_map.bpf.o xdp_sample.bpf.o
+xdp_redirect.skel.h-deps := xdp_redirect.bpf.o xdp_sample.bpf.o
+xdp_monitor.skel.h-deps := xdp_monitor.bpf.o xdp_sample.bpf.o
+xdp_router_ipv4.skel.h-deps := xdp_router_ipv4.bpf.o xdp_sample.bpf.o
+
+LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.bpf.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
+
+BPF_SRCS_LINKED := $(notdir $(wildcard $(src)/*.bpf.c))
+BPF_OBJS_LINKED := $(patsubst %.bpf.c,$(obj)/%.bpf.o, $(BPF_SRCS_LINKED))
+BPF_SKELS_LINKED := $(addprefix $(obj)/,$(LINKED_SKELS))
+
+$(BPF_SKELS_LINKED): $(BPF_OBJS_LINKED) $(BPFTOOL)
+ @echo " BPF GEN-OBJ " $(@:.skel.h=)
+ $(Q)$(BPFTOOL) gen object $(@:.skel.h=.lbpf.o) $(addprefix $(obj)/,$($(@F)-deps))
+ @echo " BPF GEN-SKEL" $(@:.skel.h=)
+ $(Q)$(BPFTOOL) gen skeleton $(@:.skel.h=.lbpf.o) name $(notdir $(@:.skel.h=)) > $@
+
+# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
+# But, there is no easy way to fix it, so just exclude it since it is
+# useless for BPF samples.
+# below we use long chain of commands, clang | opt | llvm-dis | llc,
+# to generate final object file. 'clang' compiles the source into IR
+# with native target, e.g., x64, arm64, etc. 'opt' does bpf CORE IR builtin
+# processing (llvm12) and IR optimizations. 'llvm-dis' converts
+# 'opt' output to IR, and finally 'llc' generates bpf byte code.
+$(obj)/%.o: $(src)/%.c
+ @echo " CLANG-bpf " $@
+ $(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \
+ -I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \
+ -I$(LIBBPF_INCLUDE) \
+ -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
+ -D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
+ -Wno-gnu-variable-sized-type-not-at-end \
+ -Wno-address-of-packed-member -Wno-tautological-compare \
+ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
+ -fno-asynchronous-unwind-tables \
+ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
+ -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \
+ $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \
+ $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+endif
diff --git a/samples/bpf/Makefile.target b/samples/bpf/Makefile.target
new file mode 100644
index 000000000..7621f55e2
--- /dev/null
+++ b/samples/bpf/Makefile.target
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+# ==========================================================================
+# Building binaries on the host system
+# Binaries are not used during the compilation of the kernel, and intended
+# to be build for target board, target board can be host of course. Added to
+# build binaries to run not on host system.
+#
+# Sample syntax
+# tprogs-y := xsk_example
+# Will compile xsk_example.c and create an executable named xsk_example
+#
+# tprogs-y := xdpsock
+# xdpsock-objs := xdpsock_1.o xdpsock_2.o
+# Will compile xdpsock_1.c and xdpsock_2.c, and then link the executable
+# xdpsock, based on xdpsock_1.o and xdpsock_2.o
+#
+# Derived from scripts/Makefile.host
+#
+__tprogs := $(sort $(tprogs-y))
+
+# C code
+# Executables compiled from a single .c file
+tprog-csingle := $(foreach m,$(__tprogs), \
+ $(if $($(m)-objs),,$(m)))
+
+# C executables linked based on several .o files
+tprog-cmulti := $(foreach m,$(__tprogs),\
+ $(if $($(m)-objs),$(m)))
+
+# Object (.o) files compiled from .c files
+tprog-cobjs := $(sort $(foreach m,$(__tprogs),$($(m)-objs)))
+
+tprog-csingle := $(addprefix $(obj)/,$(tprog-csingle))
+tprog-cmulti := $(addprefix $(obj)/,$(tprog-cmulti))
+tprog-cobjs := $(addprefix $(obj)/,$(tprog-cobjs))
+
+#####
+# Handle options to gcc. Support building with separate output directory
+
+_tprogc_flags = $(TPROGS_CFLAGS) \
+ $(TPROGCFLAGS_$(basetarget).o)
+
+# $(objtree)/$(obj) for including generated headers from checkin source files
+ifeq ($(KBUILD_EXTMOD),)
+ifdef building_out_of_srctree
+_tprogc_flags += -I $(objtree)/$(obj)
+endif
+endif
+
+tprogc_flags = -Wp,-MD,$(depfile) $(_tprogc_flags)
+
+# Create executable from a single .c file
+# tprog-csingle -> Executable
+quiet_cmd_tprog-csingle = CC $@
+ cmd_tprog-csingle = $(CC) $(tprogc_flags) $(TPROGS_LDFLAGS) -o $@ $< \
+ $(TPROGS_LDLIBS) $(TPROGLDLIBS_$(@F))
+$(tprog-csingle): $(obj)/%: $(src)/%.c FORCE
+ $(call if_changed_dep,tprog-csingle)
+
+# Link an executable based on list of .o files, all plain c
+# tprog-cmulti -> executable
+quiet_cmd_tprog-cmulti = LD $@
+ cmd_tprog-cmulti = $(CC) $(tprogc_flags) $(TPROGS_LDFLAGS) -o $@ \
+ $(addprefix $(obj)/,$($(@F)-objs)) \
+ $(TPROGS_LDLIBS) $(TPROGLDLIBS_$(@F))
+$(tprog-cmulti): $(tprog-cobjs) FORCE
+ $(call if_changed,tprog-cmulti)
+$(call multi_depend, $(tprog-cmulti), , -objs)
+
+# Create .o file from a single .c file
+# tprog-cobjs -> .o
+quiet_cmd_tprog-cobjs = CC $@
+ cmd_tprog-cobjs = $(CC) $(tprogc_flags) -c -o $@ $<
+$(tprog-cobjs): $(obj)/%.o: $(src)/%.c FORCE
+ $(call if_changed_dep,tprog-cobjs)
diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst
new file mode 100644
index 000000000..60c6494ad
--- /dev/null
+++ b/samples/bpf/README.rst
@@ -0,0 +1,111 @@
+eBPF sample programs
+====================
+
+This directory contains a test stubs, verifier test-suite and examples
+for using eBPF. The examples use libbpf from tools/lib/bpf.
+
+Build dependencies
+==================
+
+Compiling requires having installed:
+ * clang >= version 3.4.0
+ * llvm >= version 3.7.1
+
+Note that LLVM's tool 'llc' must support target 'bpf', list version
+and supported targets with command: ``llc --version``
+
+Clean and configuration
+-----------------------
+
+It can be needed to clean tools, samples or kernel before trying new arch or
+after some changes (on demand)::
+
+ make -C tools clean
+ make -C samples/bpf clean
+ make clean
+
+Configure kernel, defconfig for instance::
+
+ make defconfig
+
+Kernel headers
+--------------
+
+There are usually dependencies to header files of the current kernel.
+To avoid installing devel kernel headers system wide, as a normal
+user, simply call::
+
+ make headers_install
+
+This will creates a local "usr/include" directory in the git/build top
+level directory, that the make system automatically pickup first.
+
+Compiling
+=========
+
+For building the BPF samples, issue the below command from the kernel
+top level directory::
+
+ make M=samples/bpf
+
+It is also possible to call make from this directory. This will just
+hide the invocation of make as above.
+
+Manually compiling LLVM with 'bpf' support
+------------------------------------------
+
+Since version 3.7.0, LLVM adds a proper LLVM backend target for the
+BPF bytecode architecture.
+
+By default llvm will build all non-experimental backends including bpf.
+To generate a smaller llc binary one can use::
+
+ -DLLVM_TARGETS_TO_BUILD="BPF"
+
+We recommend that developers who want the fastest incremental builds
+use the Ninja build system, you can find it in your system's package
+manager, usually the package is ninja or ninja-build.
+
+Quick sniplet for manually compiling LLVM and clang
+(build dependencies are ninja, cmake and gcc-c++)::
+
+ $ git clone https://github.com/llvm/llvm-project.git
+ $ mkdir -p llvm-project/llvm/build
+ $ cd llvm-project/llvm/build
+ $ cmake .. -G "Ninja" -DLLVM_TARGETS_TO_BUILD="BPF;X86" \
+ -DLLVM_ENABLE_PROJECTS="clang" \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DLLVM_BUILD_RUNTIME=OFF
+ $ ninja
+
+It is also possible to point make to the newly compiled 'llc' or
+'clang' command via redefining LLC or CLANG on the make command line::
+
+ make M=samples/bpf LLC=~/git/llvm-project/llvm/build/bin/llc CLANG=~/git/llvm-project/llvm/build/bin/clang
+
+Cross compiling samples
+-----------------------
+In order to cross-compile, say for arm64 targets, export CROSS_COMPILE and ARCH
+environment variables before calling make. But do this before clean,
+cofiguration and header install steps described above. This will direct make to
+build samples for the cross target::
+
+ export ARCH=arm64
+ export CROSS_COMPILE="aarch64-linux-gnu-"
+
+Headers can be also installed on RFS of target board if need to keep them in
+sync (not necessarily and it creates a local "usr/include" directory also)::
+
+ make INSTALL_HDR_PATH=~/some_sysroot/usr headers_install
+
+Pointing LLC and CLANG is not necessarily if it's installed on HOST and have
+in its targets appropriate arm64 arch (usually it has several arches).
+Build samples::
+
+ make M=samples/bpf
+
+Or build samples with SYSROOT if some header or library is absent in toolchain,
+say libelf, providing address to file system containing headers and libs,
+can be RFS of target board::
+
+ make M=samples/bpf SYSROOT=~/some_sysroot
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
new file mode 100644
index 000000000..7048bb359
--- /dev/null
+++ b/samples/bpf/asm_goto_workaround.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Facebook */
+#ifndef __ASM_GOTO_WORKAROUND_H
+#define __ASM_GOTO_WORKAROUND_H
+
+/*
+ * This will bring in asm_volatile_goto and asm_inline macro definitions
+ * if enabled by compiler and config options.
+ */
+#include <linux/types.h>
+
+#ifdef asm_volatile_goto
+#undef asm_volatile_goto
+#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
+#endif
+
+/*
+ * asm_inline is defined as asm __inline in "include/linux/compiler_types.h"
+ * if supported by the kernel's CC (i.e CONFIG_CC_HAS_ASM_INLINE) which is not
+ * supported by CLANG.
+ */
+#ifdef asm_inline
+#undef asm_inline
+#define asm_inline asm
+#endif
+
+#define volatile(x...) volatile("")
+#endif
diff --git a/samples/bpf/bpf_insn.h b/samples/bpf/bpf_insn.h
new file mode 100644
index 000000000..29c3bb6ad
--- /dev/null
+++ b/samples/bpf/bpf_insn.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* eBPF instruction mini library */
+#ifndef __BPF_INSN_H
+#define __BPF_INSN_H
+
+struct bpf_insn;
+
+/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
+
+#define BPF_ALU64_REG(OP, DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+#define BPF_ALU32_REG(OP, DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
+
+#define BPF_ALU64_IMM(OP, DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+#define BPF_ALU32_IMM(OP, DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Short form of mov, dst_reg = src_reg */
+
+#define BPF_MOV64_REG(DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+#define BPF_MOV32_REG(DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+/* Short form of mov, dst_reg = imm32 */
+
+#define BPF_MOV64_IMM(DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+#define BPF_MOV32_IMM(DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
+#define BPF_LD_IMM64(DST, IMM) \
+ BPF_LD_IMM64_RAW(DST, 0, IMM)
+
+#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_LD | BPF_DW | BPF_IMM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = (__u32) (IMM) }), \
+ ((struct bpf_insn) { \
+ .code = 0, /* zero is reserved opcode */ \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = ((__u64) (IMM)) >> 32 })
+
+#ifndef BPF_PSEUDO_MAP_FD
+# define BPF_PSEUDO_MAP_FD 1
+#endif
+
+/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
+#define BPF_LD_MAP_FD(DST, MAP_FD) \
+ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
+
+
+/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
+
+#define BPF_LD_ABS(SIZE, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
+
+#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
+
+#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/*
+ * Atomic operations:
+ *
+ * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
+ * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
+ * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
+ * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
+ * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
+ * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
+ * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
+ * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
+ * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
+ * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
+
+/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
+
+#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
+
+#define BPF_JMP_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
+
+#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Raw code statement block */
+
+#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
+ ((struct bpf_insn) { \
+ .code = CODE, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Program exit */
+
+#define BPF_EXIT_INSN() \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_EXIT, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
+#endif
diff --git a/samples/bpf/cookie_uid_helper_example.c b/samples/bpf/cookie_uid_helper_example.c
new file mode 100644
index 000000000..f0df3dda4
--- /dev/null
+++ b/samples/bpf/cookie_uid_helper_example.c
@@ -0,0 +1,332 @@
+/* This test is a demo of using get_socket_uid and get_socket_cookie
+ * helper function to do per socket based network traffic monitoring.
+ * It requires iptables version higher then 1.6.1. to load pinned eBPF
+ * program into the xt_bpf match.
+ *
+ * TEST:
+ * ./run_cookie_uid_helper_example.sh -option
+ * option:
+ * -t: do traffic monitoring test, the program will continuously
+ * print out network traffic happens after program started A sample
+ * output is shown below:
+ *
+ * cookie: 877, uid: 0x3e8, Pakcet Count: 20, Bytes Count: 11058
+ * cookie: 132, uid: 0x0, Pakcet Count: 2, Bytes Count: 286
+ * cookie: 812, uid: 0x3e8, Pakcet Count: 3, Bytes Count: 1726
+ * cookie: 802, uid: 0x3e8, Pakcet Count: 2, Bytes Count: 104
+ * cookie: 877, uid: 0x3e8, Pakcet Count: 20, Bytes Count: 11058
+ * cookie: 831, uid: 0x3e8, Pakcet Count: 2, Bytes Count: 104
+ * cookie: 0, uid: 0x0, Pakcet Count: 6, Bytes Count: 712
+ * cookie: 880, uid: 0xfffe, Pakcet Count: 1, Bytes Count: 70
+ *
+ * -s: do getsockopt SO_COOKIE test, the program will set up a pair of
+ * UDP sockets and send packets between them. And read out the traffic data
+ * directly from the ebpf map based on the socket cookie.
+ *
+ * Clean up: if using shell script, the script file will delete the iptables
+ * rule and unmount the bpf program when exit. Else the iptables rule need
+ * to be deleted by hand, see run_cookie_uid_helper_example.sh for detail.
+ */
+
+#define _GNU_SOURCE
+
+#define offsetof(type, member) __builtin_offsetof(type, member)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <limits.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <net/if.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+#include "bpf_insn.h"
+
+#define PORT 8888
+
+struct stats {
+ uint32_t uid;
+ uint64_t packets;
+ uint64_t bytes;
+};
+
+static int map_fd, prog_fd;
+
+static bool test_finish;
+
+static void maps_create(void)
+{
+ map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(uint32_t),
+ sizeof(struct stats), 100, NULL);
+ if (map_fd < 0)
+ error(1, errno, "map create failed!\n");
+}
+
+static void prog_load(void)
+{
+ static char log_buf[1 << 16];
+
+ struct bpf_insn prog[] = {
+ /*
+ * Save sk_buff for future usage. value stored in R6 to R10 will
+ * not be reset after a bpf helper function call.
+ */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /*
+ * pc1: BPF_FUNC_get_socket_cookie takes one parameter,
+ * R1: sk_buff
+ */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_socket_cookie),
+ /* pc2-4: save &socketCookie to r7 for future usage*/
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ /*
+ * pc5-8: set up the registers for BPF_FUNC_map_lookup_elem,
+ * it takes two parameters (R1: map_fd, R2: &socket_cookie)
+ */
+ BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ /*
+ * pc9. if r0 != 0x0, go to pc+14, since we have the cookie
+ * stored already
+ * Otherwise do pc10-22 to setup a new data entry.
+ */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 14),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_socket_uid),
+ /*
+ * Place a struct stats in the R10 stack and sequentially
+ * place the member value into the memory. Packets value
+ * is set by directly place a IMM value 1 into the stack.
+ */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0,
+ -32 + (__s16)offsetof(struct stats, uid)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10,
+ -32 + (__s16)offsetof(struct stats, packets), 1),
+ /*
+ * __sk_buff is a special struct used for eBPF program to
+ * directly access some sk_buff field.
+ */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1,
+ -32 + (__s16)offsetof(struct stats, bytes)),
+ /*
+ * add new map entry using BPF_FUNC_map_update_elem, it takes
+ * 4 parameters (R1: map_fd, R2: &socket_cookie, R3: &stats,
+ * R4: flags)
+ */
+ BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -32),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_update_elem),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 5),
+ /*
+ * pc24-30 update the packet info to a exist data entry, it can
+ * be done by directly write to pointers instead of using
+ * BPF_FUNC_map_update_elem helper function
+ */
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1,
+ offsetof(struct stats, packets)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1,
+ offsetof(struct stats, bytes)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = log_buf,
+ .log_size = sizeof(log_buf),
+ );
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
+ prog, ARRAY_SIZE(prog), &opts);
+ if (prog_fd < 0)
+ error(1, errno, "failed to load prog\n%s\n", log_buf);
+}
+
+static void prog_attach_iptables(char *file)
+{
+ int ret;
+ char rules[256];
+
+ if (bpf_obj_pin(prog_fd, file))
+ error(1, errno, "bpf_obj_pin");
+ if (strlen(file) > 50) {
+ printf("file path too long: %s\n", file);
+ exit(1);
+ }
+ ret = snprintf(rules, sizeof(rules),
+ "iptables -A OUTPUT -m bpf --object-pinned %s -j ACCEPT",
+ file);
+ if (ret < 0 || ret >= sizeof(rules)) {
+ printf("error constructing iptables command\n");
+ exit(1);
+ }
+ ret = system(rules);
+ if (ret < 0) {
+ printf("iptables rule update failed: %d/n", WEXITSTATUS(ret));
+ exit(1);
+ }
+}
+
+static void print_table(void)
+{
+ struct stats curEntry;
+ uint32_t curN = UINT32_MAX;
+ uint32_t nextN;
+ int res;
+
+ while (bpf_map_get_next_key(map_fd, &curN, &nextN) > -1) {
+ curN = nextN;
+ res = bpf_map_lookup_elem(map_fd, &curN, &curEntry);
+ if (res < 0) {
+ error(1, errno, "fail to get entry value of Key: %u\n",
+ curN);
+ } else {
+ printf("cookie: %u, uid: 0x%x, Packet Count: %lu,"
+ " Bytes Count: %lu\n", curN, curEntry.uid,
+ curEntry.packets, curEntry.bytes);
+ }
+ }
+}
+
+static void udp_client(void)
+{
+ struct sockaddr_in si_other = {0};
+ struct sockaddr_in si_me = {0};
+ struct stats dataEntry;
+ int s_rcv, s_send, i, recv_len;
+ char message = 'a';
+ char buf;
+ uint64_t cookie;
+ int res;
+ socklen_t cookie_len = sizeof(cookie);
+ socklen_t slen = sizeof(si_other);
+
+ s_rcv = socket(PF_INET, SOCK_DGRAM, 0);
+ if (s_rcv < 0)
+ error(1, errno, "rcv socket creat failed!\n");
+ si_other.sin_family = AF_INET;
+ si_other.sin_port = htons(PORT);
+ if (inet_aton("127.0.0.1", &si_other.sin_addr) == 0)
+ error(1, errno, "inet_aton\n");
+ if (bind(s_rcv, (struct sockaddr *)&si_other, sizeof(si_other)) == -1)
+ error(1, errno, "bind\n");
+ s_send = socket(PF_INET, SOCK_DGRAM, 0);
+ if (s_send < 0)
+ error(1, errno, "send socket creat failed!\n");
+ res = getsockopt(s_send, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len);
+ if (res < 0)
+ printf("get cookie failed: %s\n", strerror(errno));
+ res = bpf_map_lookup_elem(map_fd, &cookie, &dataEntry);
+ if (res != -1)
+ error(1, errno, "socket stat found while flow not active\n");
+ for (i = 0; i < 10; i++) {
+ res = sendto(s_send, &message, sizeof(message), 0,
+ (struct sockaddr *)&si_other, slen);
+ if (res == -1)
+ error(1, errno, "send\n");
+ if (res != sizeof(message))
+ error(1, 0, "%uB != %luB\n", res, sizeof(message));
+ recv_len = recvfrom(s_rcv, &buf, sizeof(buf), 0,
+ (struct sockaddr *)&si_me, &slen);
+ if (recv_len < 0)
+ error(1, errno, "receive\n");
+ res = memcmp(&(si_other.sin_addr), &(si_me.sin_addr),
+ sizeof(si_me.sin_addr));
+ if (res != 0)
+ error(1, EFAULT, "sender addr error: %d\n", res);
+ printf("Message received: %c\n", buf);
+ res = bpf_map_lookup_elem(map_fd, &cookie, &dataEntry);
+ if (res < 0)
+ error(1, errno, "lookup sk stat failed, cookie: %lu\n",
+ cookie);
+ printf("cookie: %lu, uid: 0x%x, Packet Count: %lu,"
+ " Bytes Count: %lu\n\n", cookie, dataEntry.uid,
+ dataEntry.packets, dataEntry.bytes);
+ }
+ close(s_send);
+ close(s_rcv);
+}
+
+static int usage(void)
+{
+ printf("Usage: ./run_cookie_uid_helper_example.sh"
+ " bpfObjName -option\n"
+ " -t traffic monitor test\n"
+ " -s getsockopt cookie test\n");
+ return 1;
+}
+
+static void finish(int ret)
+{
+ test_finish = true;
+}
+
+int main(int argc, char *argv[])
+{
+ int opt;
+ bool cfg_test_traffic = false;
+ bool cfg_test_cookie = false;
+
+ if (argc != 3)
+ return usage();
+ while ((opt = getopt(argc, argv, "ts")) != -1) {
+ switch (opt) {
+ case 't':
+ cfg_test_traffic = true;
+ break;
+ case 's':
+ cfg_test_cookie = true;
+ break;
+
+ default:
+ printf("unknown option %c\n", opt);
+ usage();
+ return -1;
+ }
+ }
+ maps_create();
+ prog_load();
+ prog_attach_iptables(argv[2]);
+ if (cfg_test_traffic) {
+ if (signal(SIGINT, finish) == SIG_ERR)
+ error(1, errno, "register SIGINT handler failed");
+ if (signal(SIGTERM, finish) == SIG_ERR)
+ error(1, errno, "register SIGTERM handler failed");
+ while (!test_finish) {
+ print_table();
+ printf("\n");
+ sleep(1);
+ }
+ } else if (cfg_test_cookie) {
+ udp_client();
+ }
+ close(prog_fd);
+ close(map_fd);
+ return 0;
+}
diff --git a/samples/bpf/cpustat_kern.c b/samples/bpf/cpustat_kern.c
new file mode 100644
index 000000000..5aefd19cd
--- /dev/null
+++ b/samples/bpf/cpustat_kern.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/version.h>
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/*
+ * The CPU number, cstate number and pstate number are based
+ * on 96boards Hikey with octa CA53 CPUs.
+ *
+ * Every CPU have three idle states for cstate:
+ * WFI, CPU_OFF, CLUSTER_OFF
+ *
+ * Every CPU have 5 operating points:
+ * 208MHz, 432MHz, 729MHz, 960MHz, 1200MHz
+ *
+ * This code is based on these assumption and other platforms
+ * need to adjust these definitions.
+ */
+#define MAX_CPU 8
+#define MAX_PSTATE_ENTRIES 5
+#define MAX_CSTATE_ENTRIES 3
+
+static int cpu_opps[] = { 208000, 432000, 729000, 960000, 1200000 };
+
+/*
+ * my_map structure is used to record cstate and pstate index and
+ * timestamp (Idx, Ts), when new event incoming we need to update
+ * combination for new state index and timestamp (Idx`, Ts`).
+ *
+ * Based on (Idx, Ts) and (Idx`, Ts`) we can calculate the time
+ * interval for the previous state: Duration(Idx) = Ts` - Ts.
+ *
+ * Every CPU has one below array for recording state index and
+ * timestamp, and record for cstate and pstate saperately:
+ *
+ * +--------------------------+
+ * | cstate timestamp |
+ * +--------------------------+
+ * | cstate index |
+ * +--------------------------+
+ * | pstate timestamp |
+ * +--------------------------+
+ * | pstate index |
+ * +--------------------------+
+ */
+#define MAP_OFF_CSTATE_TIME 0
+#define MAP_OFF_CSTATE_IDX 1
+#define MAP_OFF_PSTATE_TIME 2
+#define MAP_OFF_PSTATE_IDX 3
+#define MAP_OFF_NUM 4
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_CPU * MAP_OFF_NUM);
+} my_map SEC(".maps");
+
+/* cstate_duration records duration time for every idle state per CPU */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_CPU * MAX_CSTATE_ENTRIES);
+} cstate_duration SEC(".maps");
+
+/* pstate_duration records duration time for every operating point per CPU */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_CPU * MAX_PSTATE_ENTRIES);
+} pstate_duration SEC(".maps");
+
+/*
+ * The trace events for cpu_idle and cpu_frequency are taken from:
+ * /sys/kernel/debug/tracing/events/power/cpu_idle/format
+ * /sys/kernel/debug/tracing/events/power/cpu_frequency/format
+ *
+ * These two events have same format, so define one common structure.
+ */
+struct cpu_args {
+ u64 pad;
+ u32 state;
+ u32 cpu_id;
+};
+
+/* calculate pstate index, returns MAX_PSTATE_ENTRIES for failure */
+static u32 find_cpu_pstate_idx(u32 frequency)
+{
+ u32 i;
+
+ for (i = 0; i < sizeof(cpu_opps) / sizeof(u32); i++) {
+ if (frequency == cpu_opps[i])
+ return i;
+ }
+
+ return i;
+}
+
+SEC("tracepoint/power/cpu_idle")
+int bpf_prog1(struct cpu_args *ctx)
+{
+ u64 *cts, *pts, *cstate, *pstate, prev_state, cur_ts, delta;
+ u32 key, cpu, pstate_idx;
+ u64 *val;
+
+ if (ctx->cpu_id > MAX_CPU)
+ return 0;
+
+ cpu = ctx->cpu_id;
+
+ key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_TIME;
+ cts = bpf_map_lookup_elem(&my_map, &key);
+ if (!cts)
+ return 0;
+
+ key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_IDX;
+ cstate = bpf_map_lookup_elem(&my_map, &key);
+ if (!cstate)
+ return 0;
+
+ key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_TIME;
+ pts = bpf_map_lookup_elem(&my_map, &key);
+ if (!pts)
+ return 0;
+
+ key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_IDX;
+ pstate = bpf_map_lookup_elem(&my_map, &key);
+ if (!pstate)
+ return 0;
+
+ prev_state = *cstate;
+ *cstate = ctx->state;
+
+ if (!*cts) {
+ *cts = bpf_ktime_get_ns();
+ return 0;
+ }
+
+ cur_ts = bpf_ktime_get_ns();
+ delta = cur_ts - *cts;
+ *cts = cur_ts;
+
+ /*
+ * When state doesn't equal to (u32)-1, the cpu will enter
+ * one idle state; for this case we need to record interval
+ * for the pstate.
+ *
+ * OPP2
+ * +---------------------+
+ * OPP1 | |
+ * ---------+ |
+ * | Idle state
+ * +---------------
+ *
+ * |<- pstate duration ->|
+ * ^ ^
+ * pts cur_ts
+ */
+ if (ctx->state != (u32)-1) {
+
+ /* record pstate after have first cpu_frequency event */
+ if (!*pts)
+ return 0;
+
+ delta = cur_ts - *pts;
+
+ pstate_idx = find_cpu_pstate_idx(*pstate);
+ if (pstate_idx >= MAX_PSTATE_ENTRIES)
+ return 0;
+
+ key = cpu * MAX_PSTATE_ENTRIES + pstate_idx;
+ val = bpf_map_lookup_elem(&pstate_duration, &key);
+ if (val)
+ __sync_fetch_and_add((long *)val, delta);
+
+ /*
+ * When state equal to (u32)-1, the cpu just exits from one
+ * specific idle state; for this case we need to record
+ * interval for the pstate.
+ *
+ * OPP2
+ * -----------+
+ * | OPP1
+ * | +-----------
+ * | Idle state |
+ * +---------------------+
+ *
+ * |<- cstate duration ->|
+ * ^ ^
+ * cts cur_ts
+ */
+ } else {
+
+ key = cpu * MAX_CSTATE_ENTRIES + prev_state;
+ val = bpf_map_lookup_elem(&cstate_duration, &key);
+ if (val)
+ __sync_fetch_and_add((long *)val, delta);
+ }
+
+ /* Update timestamp for pstate as new start time */
+ if (*pts)
+ *pts = cur_ts;
+
+ return 0;
+}
+
+SEC("tracepoint/power/cpu_frequency")
+int bpf_prog2(struct cpu_args *ctx)
+{
+ u64 *pts, *cstate, *pstate, prev_state, cur_ts, delta;
+ u32 key, cpu, pstate_idx;
+ u64 *val;
+
+ cpu = ctx->cpu_id;
+
+ key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_TIME;
+ pts = bpf_map_lookup_elem(&my_map, &key);
+ if (!pts)
+ return 0;
+
+ key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_IDX;
+ pstate = bpf_map_lookup_elem(&my_map, &key);
+ if (!pstate)
+ return 0;
+
+ key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_IDX;
+ cstate = bpf_map_lookup_elem(&my_map, &key);
+ if (!cstate)
+ return 0;
+
+ prev_state = *pstate;
+ *pstate = ctx->state;
+
+ if (!*pts) {
+ *pts = bpf_ktime_get_ns();
+ return 0;
+ }
+
+ cur_ts = bpf_ktime_get_ns();
+ delta = cur_ts - *pts;
+ *pts = cur_ts;
+
+ /* When CPU is in idle, bail out to skip pstate statistics */
+ if (*cstate != (u32)(-1))
+ return 0;
+
+ /*
+ * The cpu changes to another different OPP (in below diagram
+ * change frequency from OPP3 to OPP1), need recording interval
+ * for previous frequency OPP3 and update timestamp as start
+ * time for new frequency OPP1.
+ *
+ * OPP3
+ * +---------------------+
+ * OPP2 | |
+ * ---------+ |
+ * | OPP1
+ * +---------------
+ *
+ * |<- pstate duration ->|
+ * ^ ^
+ * pts cur_ts
+ */
+ pstate_idx = find_cpu_pstate_idx(*pstate);
+ if (pstate_idx >= MAX_PSTATE_ENTRIES)
+ return 0;
+
+ key = cpu * MAX_PSTATE_ENTRIES + pstate_idx;
+ val = bpf_map_lookup_elem(&pstate_duration, &key);
+ if (val)
+ __sync_fetch_and_add((long *)val, delta);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/cpustat_user.c b/samples/bpf/cpustat_user.c
new file mode 100644
index 000000000..ab90bb08a
--- /dev/null
+++ b/samples/bpf/cpustat_user.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <sched.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <locale.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+static int cstate_map_fd, pstate_map_fd;
+
+#define MAX_CPU 8
+#define MAX_PSTATE_ENTRIES 5
+#define MAX_CSTATE_ENTRIES 3
+#define MAX_STARS 40
+
+#define CPUFREQ_MAX_SYSFS_PATH "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq"
+#define CPUFREQ_LOWEST_FREQ "208000"
+#define CPUFREQ_HIGHEST_FREQ "12000000"
+
+struct cpu_stat_data {
+ unsigned long cstate[MAX_CSTATE_ENTRIES];
+ unsigned long pstate[MAX_PSTATE_ENTRIES];
+};
+
+static struct cpu_stat_data stat_data[MAX_CPU];
+
+static void cpu_stat_print(void)
+{
+ int i, j;
+ char state_str[sizeof("cstate-9")];
+ struct cpu_stat_data *data;
+
+ /* Clear screen */
+ printf("\033[2J");
+
+ /* Header */
+ printf("\nCPU states statistics:\n");
+ printf("%-10s ", "state(ms)");
+
+ for (i = 0; i < MAX_CSTATE_ENTRIES; i++) {
+ sprintf(state_str, "cstate-%d", i);
+ printf("%-11s ", state_str);
+ }
+
+ for (i = 0; i < MAX_PSTATE_ENTRIES; i++) {
+ sprintf(state_str, "pstate-%d", i);
+ printf("%-11s ", state_str);
+ }
+
+ printf("\n");
+
+ for (j = 0; j < MAX_CPU; j++) {
+ data = &stat_data[j];
+
+ printf("CPU-%-6d ", j);
+ for (i = 0; i < MAX_CSTATE_ENTRIES; i++)
+ printf("%-11ld ", data->cstate[i] / 1000000);
+
+ for (i = 0; i < MAX_PSTATE_ENTRIES; i++)
+ printf("%-11ld ", data->pstate[i] / 1000000);
+
+ printf("\n");
+ }
+}
+
+static void cpu_stat_update(int cstate_fd, int pstate_fd)
+{
+ unsigned long key, value;
+ int c, i;
+
+ for (c = 0; c < MAX_CPU; c++) {
+ for (i = 0; i < MAX_CSTATE_ENTRIES; i++) {
+ key = c * MAX_CSTATE_ENTRIES + i;
+ bpf_map_lookup_elem(cstate_fd, &key, &value);
+ stat_data[c].cstate[i] = value;
+ }
+
+ for (i = 0; i < MAX_PSTATE_ENTRIES; i++) {
+ key = c * MAX_PSTATE_ENTRIES + i;
+ bpf_map_lookup_elem(pstate_fd, &key, &value);
+ stat_data[c].pstate[i] = value;
+ }
+ }
+}
+
+/*
+ * This function is copied from 'idlestat' tool function
+ * idlestat_wake_all() in idlestate.c.
+ *
+ * It sets the self running task affinity to cpus one by one so can wake up
+ * the specific CPU to handle scheduling; this results in all cpus can be
+ * waken up once and produce ftrace event 'trace_cpu_idle'.
+ */
+static int cpu_stat_inject_cpu_idle_event(void)
+{
+ int rcpu, i, ret;
+ cpu_set_t cpumask;
+ cpu_set_t original_cpumask;
+
+ ret = sysconf(_SC_NPROCESSORS_CONF);
+ if (ret < 0)
+ return -1;
+
+ rcpu = sched_getcpu();
+ if (rcpu < 0)
+ return -1;
+
+ /* Keep track of the CPUs we will run on */
+ sched_getaffinity(0, sizeof(original_cpumask), &original_cpumask);
+
+ for (i = 0; i < ret; i++) {
+
+ /* Pointless to wake up ourself */
+ if (i == rcpu)
+ continue;
+
+ /* Pointless to wake CPUs we will not run on */
+ if (!CPU_ISSET(i, &original_cpumask))
+ continue;
+
+ CPU_ZERO(&cpumask);
+ CPU_SET(i, &cpumask);
+
+ sched_setaffinity(0, sizeof(cpumask), &cpumask);
+ }
+
+ /* Enable all the CPUs of the original mask */
+ sched_setaffinity(0, sizeof(original_cpumask), &original_cpumask);
+ return 0;
+}
+
+/*
+ * It's possible to have no any frequency change for long time and cannot
+ * get ftrace event 'trace_cpu_frequency' for long period, this introduces
+ * big deviation for pstate statistics.
+ *
+ * To solve this issue, below code forces to set 'scaling_max_freq' to 208MHz
+ * for triggering ftrace event 'trace_cpu_frequency' and then recovery back to
+ * the maximum frequency value 1.2GHz.
+ */
+static int cpu_stat_inject_cpu_frequency_event(void)
+{
+ int len, fd;
+
+ fd = open(CPUFREQ_MAX_SYSFS_PATH, O_WRONLY);
+ if (fd < 0) {
+ printf("failed to open scaling_max_freq, errno=%d\n", errno);
+ return fd;
+ }
+
+ len = write(fd, CPUFREQ_LOWEST_FREQ, strlen(CPUFREQ_LOWEST_FREQ));
+ if (len < 0) {
+ printf("failed to open scaling_max_freq, errno=%d\n", errno);
+ goto err;
+ }
+
+ len = write(fd, CPUFREQ_HIGHEST_FREQ, strlen(CPUFREQ_HIGHEST_FREQ));
+ if (len < 0) {
+ printf("failed to open scaling_max_freq, errno=%d\n", errno);
+ goto err;
+ }
+
+err:
+ close(fd);
+ return len;
+}
+
+static void int_exit(int sig)
+{
+ cpu_stat_inject_cpu_idle_event();
+ cpu_stat_inject_cpu_frequency_event();
+ cpu_stat_update(cstate_map_fd, pstate_map_fd);
+ cpu_stat_print();
+ exit(0);
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ char filename[256];
+ int ret;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (!prog) {
+ printf("finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ cstate_map_fd = bpf_object__find_map_fd_by_name(obj, "cstate_duration");
+ pstate_map_fd = bpf_object__find_map_fd_by_name(obj, "pstate_duration");
+ if (cstate_map_fd < 0 || pstate_map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
+ }
+
+ ret = cpu_stat_inject_cpu_idle_event();
+ if (ret < 0)
+ return 1;
+
+ ret = cpu_stat_inject_cpu_frequency_event();
+ if (ret < 0)
+ return 1;
+
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ while (1) {
+ cpu_stat_update(cstate_map_fd, pstate_map_fd);
+ cpu_stat_print();
+ sleep(5);
+ }
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/do_hbm_test.sh b/samples/bpf/do_hbm_test.sh
new file mode 100755
index 000000000..38e459935
--- /dev/null
+++ b/samples/bpf/do_hbm_test.sh
@@ -0,0 +1,438 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2019 Facebook
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of version 2 of the GNU General Public
+# License as published by the Free Software Foundation.
+
+Usage() {
+ echo "Script for testing HBM (Host Bandwidth Manager) framework."
+ echo "It creates a cgroup to use for testing and load a BPF program to limit"
+ echo "egress or ingress bandwidth. It then uses iperf3 or netperf to create"
+ echo "loads. The output is the goodput in Mbps (unless -D was used)."
+ echo ""
+ echo "USAGE: $name [out] [-b=<prog>|--bpf=<prog>] [-c=<cc>|--cc=<cc>]"
+ echo " [-D] [-d=<delay>|--delay=<delay>] [--debug] [-E] [--edt]"
+ echo " [-f=<#flows>|--flows=<#flows>] [-h] [-i=<id>|--id=<id >]"
+ echo " [-l] [-N] [--no_cn] [-p=<port>|--port=<port>] [-P]"
+ echo " [-q=<qdisc>] [-R] [-s=<server>|--server=<server]"
+ echo " [-S|--stats] -t=<time>|--time=<time>] [-w] [cubic|dctcp]"
+ echo " Where:"
+ echo " out egress (default)"
+ echo " -b or --bpf BPF program filename to load and attach."
+ echo " Default is hbm_out_kern.o for egress,"
+ echo " -c or -cc TCP congestion control (cubic or dctcp)"
+ echo " --debug print BPF trace buffer"
+ echo " -d or --delay add a delay in ms using netem"
+ echo " -D In addition to the goodput in Mbps, it also outputs"
+ echo " other detailed information. This information is"
+ echo " test dependent (i.e. iperf3 or netperf)."
+ echo " -E enable ECN (not required for dctcp)"
+ echo " --edt use fq's Earliest Departure Time (requires fq)"
+ echo " -f or --flows number of concurrent flows (default=1)"
+ echo " -i or --id cgroup id (an integer, default is 1)"
+ echo " -N use netperf instead of iperf3"
+ echo " --no_cn Do not return CN notifications"
+ echo " -l do not limit flows using loopback"
+ echo " -h Help"
+ echo " -p or --port iperf3 port (default is 5201)"
+ echo " -P use an iperf3 instance for each flow"
+ echo " -q use the specified qdisc"
+ echo " -r or --rate rate in Mbps (default 1s 1Gbps)"
+ echo " -R Use TCP_RR for netperf. 1st flow has req"
+ echo " size of 10KB, rest of 1MB. Reply in all"
+ echo " cases is 1 byte."
+ echo " More detailed output for each flow can be found"
+ echo " in the files netperf.<cg>.<flow>, where <cg> is the"
+ echo " cgroup id as specified with the -i flag, and <flow>"
+ echo " is the flow id starting at 1 and increasing by 1 for"
+ echo " flow (as specified by -f)."
+ echo " -s or --server hostname of netperf server. Used to create netperf"
+ echo " test traffic between to hosts (default is within host)"
+ echo " netserver must be running on the host."
+ echo " -S or --stats whether to update hbm stats (default is yes)."
+ echo " -t or --time duration of iperf3 in seconds (default=5)"
+ echo " -w Work conserving flag. cgroup can increase its"
+ echo " bandwidth beyond the rate limit specified"
+ echo " while there is available bandwidth. Current"
+ echo " implementation assumes there is only one NIC"
+ echo " (eth0), but can be extended to support multiple"
+ echo " NICs."
+ echo " cubic or dctcp specify which TCP CC to use"
+ echo " "
+ exit
+}
+
+#set -x
+
+debug_flag=0
+args="$@"
+name="$0"
+netem=0
+cc=x
+dir="-o"
+dir_name="out"
+dur=5
+flows=1
+id=1
+prog=""
+port=5201
+rate=1000
+multi_iperf=0
+flow_cnt=1
+use_netperf=0
+rr=0
+ecn=0
+details=0
+server=""
+qdisc=""
+flags=""
+do_stats=0
+
+BPFFS=/sys/fs/bpf
+function config_bpffs () {
+ if mount | grep $BPFFS > /dev/null; then
+ echo "bpffs already mounted"
+ else
+ echo "bpffs not mounted. Mounting..."
+ mount -t bpf none $BPFFS
+ fi
+}
+
+function start_hbm () {
+ rm -f hbm.out
+ echo "./hbm $dir -n $id -r $rate -t $dur $flags $dbg $prog" > hbm.out
+ echo " " >> hbm.out
+ ./hbm $dir -n $id -r $rate -t $dur $flags $dbg $prog >> hbm.out 2>&1 &
+ echo $!
+}
+
+processArgs () {
+ for i in $args ; do
+ case $i in
+ # Support for upcomming ingress rate limiting
+ #in) # support for upcoming ingress rate limiting
+ # dir="-i"
+ # dir_name="in"
+ # ;;
+ out)
+ dir="-o"
+ dir_name="out"
+ ;;
+ -b=*|--bpf=*)
+ prog="${i#*=}"
+ ;;
+ -c=*|--cc=*)
+ cc="${i#*=}"
+ ;;
+ --no_cn)
+ flags="$flags --no_cn"
+ ;;
+ --debug)
+ flags="$flags -d"
+ debug_flag=1
+ ;;
+ -d=*|--delay=*)
+ netem="${i#*=}"
+ ;;
+ -D)
+ details=1
+ ;;
+ -E)
+ ecn=1
+ ;;
+ --edt)
+ flags="$flags --edt"
+ qdisc="fq"
+ ;;
+ -f=*|--flows=*)
+ flows="${i#*=}"
+ ;;
+ -i=*|--id=*)
+ id="${i#*=}"
+ ;;
+ -l)
+ flags="$flags -l"
+ ;;
+ -N)
+ use_netperf=1
+ ;;
+ -p=*|--port=*)
+ port="${i#*=}"
+ ;;
+ -P)
+ multi_iperf=1
+ ;;
+ -q=*)
+ qdisc="${i#*=}"
+ ;;
+ -r=*|--rate=*)
+ rate="${i#*=}"
+ ;;
+ -R)
+ rr=1
+ ;;
+ -s=*|--server=*)
+ server="${i#*=}"
+ ;;
+ -S|--stats)
+ flags="$flags -s"
+ do_stats=1
+ ;;
+ -t=*|--time=*)
+ dur="${i#*=}"
+ ;;
+ -w)
+ flags="$flags -w"
+ ;;
+ cubic)
+ cc=cubic
+ ;;
+ dctcp)
+ cc=dctcp
+ ;;
+ *)
+ echo "Unknown arg:$i"
+ Usage
+ ;;
+ esac
+ done
+}
+
+processArgs
+config_bpffs
+
+if [ $debug_flag -eq 1 ] ; then
+ rm -f hbm_out.log
+fi
+
+hbm_pid=$(start_hbm)
+usleep 100000
+
+host=`hostname`
+cg_base_dir=/sys/fs/cgroup/unified
+cg_dir="$cg_base_dir/cgroup-test-work-dir/hbm$id"
+
+echo $$ >> $cg_dir/cgroup.procs
+
+ulimit -l unlimited
+
+rm -f ss.out
+rm -f hbm.[0-9]*.$dir_name
+if [ $ecn -ne 0 ] ; then
+ sysctl -w -q -n net.ipv4.tcp_ecn=1
+fi
+
+if [ $use_netperf -eq 0 ] ; then
+ cur_cc=`sysctl -n net.ipv4.tcp_congestion_control`
+ if [ "$cc" != "x" ] ; then
+ sysctl -w -q -n net.ipv4.tcp_congestion_control=$cc
+ fi
+fi
+
+if [ "$netem" -ne "0" ] ; then
+ if [ "$qdisc" != "" ] ; then
+ echo "WARNING: Ignoring -q options because -d option used"
+ fi
+ tc qdisc del dev lo root > /dev/null 2>&1
+ tc qdisc add dev lo root netem delay $netem\ms > /dev/null 2>&1
+elif [ "$qdisc" != "" ] ; then
+ tc qdisc del dev eth0 root > /dev/null 2>&1
+ tc qdisc add dev eth0 root $qdisc > /dev/null 2>&1
+fi
+
+n=0
+m=$[$dur * 5]
+hn="::1"
+if [ $use_netperf -ne 0 ] ; then
+ if [ "$server" != "" ] ; then
+ hn=$server
+ fi
+fi
+
+( ping6 -i 0.2 -c $m $hn > ping.out 2>&1 ) &
+
+if [ $use_netperf -ne 0 ] ; then
+ begNetserverPid=`ps ax | grep netserver | grep --invert-match "grep" | \
+ awk '{ print $1 }'`
+ if [ "$begNetserverPid" == "" ] ; then
+ if [ "$server" == "" ] ; then
+ ( ./netserver > /dev/null 2>&1) &
+ usleep 100000
+ fi
+ fi
+ flow_cnt=1
+ if [ "$server" == "" ] ; then
+ np_server=$host
+ else
+ np_server=$server
+ fi
+ if [ "$cc" == "x" ] ; then
+ np_cc=""
+ else
+ np_cc="-K $cc,$cc"
+ fi
+ replySize=1
+ while [ $flow_cnt -le $flows ] ; do
+ if [ $rr -ne 0 ] ; then
+ reqSize=1M
+ if [ $flow_cnt -eq 1 ] ; then
+ reqSize=10K
+ fi
+ if [ "$dir" == "-i" ] ; then
+ replySize=$reqSize
+ reqSize=1
+ fi
+ ( ./netperf -H $np_server -l $dur -f m -j -t TCP_RR -- -r $reqSize,$replySize $np_cc -k P50_lATENCY,P90_LATENCY,LOCAL_TRANSPORT_RETRANS,REMOTE_TRANSPORT_RETRANS,LOCAL_SEND_THROUGHPUT,LOCAL_RECV_THROUGHPUT,REQUEST_SIZE,RESPONSE_SIZE > netperf.$id.$flow_cnt ) &
+ else
+ if [ "$dir" == "-i" ] ; then
+ ( ./netperf -H $np_server -l $dur -f m -j -t TCP_RR -- -r 1,10M $np_cc -k P50_LATENCY,P90_LATENCY,LOCAL_TRANSPORT_RETRANS,LOCAL_SEND_THROUGHPUT,REMOTE_TRANSPORT_RETRANS,REMOTE_SEND_THROUGHPUT,REQUEST_SIZE,RESPONSE_SIZE > netperf.$id.$flow_cnt ) &
+ else
+ ( ./netperf -H $np_server -l $dur -f m -j -t TCP_STREAM -- $np_cc -k P50_lATENCY,P90_LATENCY,LOCAL_TRANSPORT_RETRANS,LOCAL_SEND_THROUGHPUT,REQUEST_SIZE,RESPONSE_SIZE > netperf.$id.$flow_cnt ) &
+ fi
+ fi
+ flow_cnt=$[flow_cnt+1]
+ done
+
+# sleep for duration of test (plus some buffer)
+ n=$[dur+2]
+ sleep $n
+
+# force graceful termination of netperf
+ pids=`pgrep netperf`
+ for p in $pids ; do
+ kill -SIGALRM $p
+ done
+
+ flow_cnt=1
+ rate=0
+ if [ $details -ne 0 ] ; then
+ echo ""
+ echo "Details for HBM in cgroup $id"
+ if [ $do_stats -eq 1 ] ; then
+ if [ -e hbm.$id.$dir_name ] ; then
+ cat hbm.$id.$dir_name
+ fi
+ fi
+ fi
+ while [ $flow_cnt -le $flows ] ; do
+ if [ "$dir" == "-i" ] ; then
+ r=`cat netperf.$id.$flow_cnt | grep -o "REMOTE_SEND_THROUGHPUT=[0-9]*" | grep -o "[0-9]*"`
+ else
+ r=`cat netperf.$id.$flow_cnt | grep -o "LOCAL_SEND_THROUGHPUT=[0-9]*" | grep -o "[0-9]*"`
+ fi
+ echo "rate for flow $flow_cnt: $r"
+ rate=$[rate+r]
+ if [ $details -ne 0 ] ; then
+ echo "-----"
+ echo "Details for cgroup $id, flow $flow_cnt"
+ cat netperf.$id.$flow_cnt
+ fi
+ flow_cnt=$[flow_cnt+1]
+ done
+ if [ $details -ne 0 ] ; then
+ echo ""
+ delay=`grep "avg" ping.out | grep -o "= [0-9.]*/[0-9.]*" | grep -o "[0-9.]*$"`
+ echo "PING AVG DELAY:$delay"
+ echo "AGGREGATE_GOODPUT:$rate"
+ else
+ echo $rate
+ fi
+elif [ $multi_iperf -eq 0 ] ; then
+ (iperf3 -s -p $port -1 > /dev/null 2>&1) &
+ usleep 100000
+ iperf3 -c $host -p $port -i 0 -P $flows -f m -t $dur > iperf.$id
+ rates=`grep receiver iperf.$id | grep -o "[0-9.]* Mbits" | grep -o "^[0-9]*"`
+ rate=`echo $rates | grep -o "[0-9]*$"`
+
+ if [ $details -ne 0 ] ; then
+ echo ""
+ echo "Details for HBM in cgroup $id"
+ if [ $do_stats -eq 1 ] ; then
+ if [ -e hbm.$id.$dir_name ] ; then
+ cat hbm.$id.$dir_name
+ fi
+ fi
+ delay=`grep "avg" ping.out | grep -o "= [0-9.]*/[0-9.]*" | grep -o "[0-9.]*$"`
+ echo "PING AVG DELAY:$delay"
+ echo "AGGREGATE_GOODPUT:$rate"
+ else
+ echo $rate
+ fi
+else
+ flow_cnt=1
+ while [ $flow_cnt -le $flows ] ; do
+ (iperf3 -s -p $port -1 > /dev/null 2>&1) &
+ ( iperf3 -c $host -p $port -i 0 -P 1 -f m -t $dur | grep receiver | grep -o "[0-9.]* Mbits" | grep -o "^[0-9]*" | grep -o "[0-9]*$" > iperf3.$id.$flow_cnt ) &
+ port=$[port+1]
+ flow_cnt=$[flow_cnt+1]
+ done
+ n=$[dur+1]
+ sleep $n
+ flow_cnt=1
+ rate=0
+ if [ $details -ne 0 ] ; then
+ echo ""
+ echo "Details for HBM in cgroup $id"
+ if [ $do_stats -eq 1 ] ; then
+ if [ -e hbm.$id.$dir_name ] ; then
+ cat hbm.$id.$dir_name
+ fi
+ fi
+ fi
+
+ while [ $flow_cnt -le $flows ] ; do
+ r=`cat iperf3.$id.$flow_cnt`
+# echo "rate for flow $flow_cnt: $r"
+ if [ $details -ne 0 ] ; then
+ echo "Rate for cgroup $id, flow $flow_cnt LOCAL_SEND_THROUGHPUT=$r"
+ fi
+ rate=$[rate+r]
+ flow_cnt=$[flow_cnt+1]
+ done
+ if [ $details -ne 0 ] ; then
+ delay=`grep "avg" ping.out | grep -o "= [0-9.]*/[0-9.]*" | grep -o "[0-9.]*$"`
+ echo "PING AVG DELAY:$delay"
+ echo "AGGREGATE_GOODPUT:$rate"
+ else
+ echo $rate
+ fi
+fi
+
+if [ $use_netperf -eq 0 ] ; then
+ sysctl -w -q -n net.ipv4.tcp_congestion_control=$cur_cc
+fi
+if [ $ecn -ne 0 ] ; then
+ sysctl -w -q -n net.ipv4.tcp_ecn=0
+fi
+if [ "$netem" -ne "0" ] ; then
+ tc qdisc del dev lo root > /dev/null 2>&1
+fi
+if [ "$qdisc" != "" ] ; then
+ tc qdisc del dev eth0 root > /dev/null 2>&1
+fi
+sleep 2
+
+hbmPid=`ps ax | grep "hbm " | grep --invert-match "grep" | awk '{ print $1 }'`
+if [ "$hbmPid" == "$hbm_pid" ] ; then
+ kill $hbm_pid
+fi
+
+sleep 1
+
+# Detach any pinned BPF programs that may have lingered
+rm -rf $BPFFS/hbm*
+
+if [ $use_netperf -ne 0 ] ; then
+ if [ "$server" == "" ] ; then
+ if [ "$begNetserverPid" == "" ] ; then
+ netserverPid=`ps ax | grep netserver | grep --invert-match "grep" | awk '{ print $1 }'`
+ if [ "$netserverPid" != "" ] ; then
+ kill $netserverPid
+ fi
+ fi
+ fi
+fi
+exit
diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c
new file mode 100644
index 000000000..88a26f3ce
--- /dev/null
+++ b/samples/bpf/fds_example.c
@@ -0,0 +1,195 @@
+#include <linux/unistd.h>
+#include <linux/bpf.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <bpf/bpf.h>
+
+#include <bpf/libbpf.h>
+#include "bpf_insn.h"
+#include "sock_example.h"
+#include "bpf_util.h"
+
+#define BPF_F_PIN (1 << 0)
+#define BPF_F_GET (1 << 1)
+#define BPF_F_PIN_GET (BPF_F_PIN | BPF_F_GET)
+
+#define BPF_F_KEY (1 << 2)
+#define BPF_F_VAL (1 << 3)
+#define BPF_F_KEY_VAL (BPF_F_KEY | BPF_F_VAL)
+
+#define BPF_M_UNSPEC 0
+#define BPF_M_MAP 1
+#define BPF_M_PROG 2
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+static void usage(void)
+{
+ printf("Usage: fds_example [...]\n");
+ printf(" -F <file> File to pin/get object\n");
+ printf(" -P |- pin object\n");
+ printf(" -G `- get object\n");
+ printf(" -m eBPF map mode\n");
+ printf(" -k <key> |- map key\n");
+ printf(" -v <value> `- map value\n");
+ printf(" -p eBPF prog mode\n");
+ printf(" -o <object> `- object file\n");
+ printf(" -h Display this help.\n");
+}
+
+static int bpf_prog_create(const char *object)
+{
+ static struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = ARRAY_SIZE(insns);
+ struct bpf_object *obj;
+ int err;
+
+ if (object) {
+ obj = bpf_object__open_file(object, NULL);
+ assert(!libbpf_get_error(obj));
+ err = bpf_object__load(obj);
+ assert(!err);
+ return bpf_program__fd(bpf_object__next_program(obj, NULL));
+ } else {
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = bpf_log_buf,
+ .log_size = BPF_LOG_BUF_SIZE,
+ );
+
+ return bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
+ insns, insns_cnt, &opts);
+ }
+}
+
+static int bpf_do_map(const char *file, uint32_t flags, uint32_t key,
+ uint32_t value)
+{
+ int fd, ret;
+
+ if (flags & BPF_F_PIN) {
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(uint32_t),
+ sizeof(uint32_t), 1024, NULL);
+ printf("bpf: map fd:%d (%s)\n", fd, strerror(errno));
+ assert(fd > 0);
+
+ ret = bpf_obj_pin(fd, file);
+ printf("bpf: pin ret:(%d,%s)\n", ret, strerror(errno));
+ assert(ret == 0);
+ } else {
+ fd = bpf_obj_get(file);
+ printf("bpf: get fd:%d (%s)\n", fd, strerror(errno));
+ assert(fd > 0);
+ }
+
+ if ((flags & BPF_F_KEY_VAL) == BPF_F_KEY_VAL) {
+ ret = bpf_map_update_elem(fd, &key, &value, 0);
+ printf("bpf: fd:%d u->(%u:%u) ret:(%d,%s)\n", fd, key, value,
+ ret, strerror(errno));
+ assert(ret == 0);
+ } else if (flags & BPF_F_KEY) {
+ ret = bpf_map_lookup_elem(fd, &key, &value);
+ printf("bpf: fd:%d l->(%u):%u ret:(%d,%s)\n", fd, key, value,
+ ret, strerror(errno));
+ assert(ret == 0);
+ }
+
+ return 0;
+}
+
+static int bpf_do_prog(const char *file, uint32_t flags, const char *object)
+{
+ int fd, sock, ret;
+
+ if (flags & BPF_F_PIN) {
+ fd = bpf_prog_create(object);
+ printf("bpf: prog fd:%d (%s)\n", fd, strerror(errno));
+ assert(fd > 0);
+
+ ret = bpf_obj_pin(fd, file);
+ printf("bpf: pin ret:(%d,%s)\n", ret, strerror(errno));
+ assert(ret == 0);
+ } else {
+ fd = bpf_obj_get(file);
+ printf("bpf: get fd:%d (%s)\n", fd, strerror(errno));
+ assert(fd > 0);
+ }
+
+ sock = open_raw_sock("lo");
+ assert(sock > 0);
+
+ ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &fd, sizeof(fd));
+ printf("bpf: sock:%d <- fd:%d attached ret:(%d,%s)\n", sock, fd,
+ ret, strerror(errno));
+ assert(ret == 0);
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ const char *file = NULL, *object = NULL;
+ uint32_t key = 0, value = 0, flags = 0;
+ int opt, mode = BPF_M_UNSPEC;
+
+ while ((opt = getopt(argc, argv, "F:PGmk:v:po:")) != -1) {
+ switch (opt) {
+ /* General args */
+ case 'F':
+ file = optarg;
+ break;
+ case 'P':
+ flags |= BPF_F_PIN;
+ break;
+ case 'G':
+ flags |= BPF_F_GET;
+ break;
+ /* Map-related args */
+ case 'm':
+ mode = BPF_M_MAP;
+ break;
+ case 'k':
+ key = strtoul(optarg, NULL, 0);
+ flags |= BPF_F_KEY;
+ break;
+ case 'v':
+ value = strtoul(optarg, NULL, 0);
+ flags |= BPF_F_VAL;
+ break;
+ /* Prog-related args */
+ case 'p':
+ mode = BPF_M_PROG;
+ break;
+ case 'o':
+ object = optarg;
+ break;
+ default:
+ goto out;
+ }
+ }
+
+ if (!(flags & BPF_F_PIN_GET) || !file)
+ goto out;
+
+ switch (mode) {
+ case BPF_M_MAP:
+ return bpf_do_map(file, flags, key, value);
+ case BPF_M_PROG:
+ return bpf_do_prog(file, flags, object);
+ }
+out:
+ usage();
+ return -1;
+}
diff --git a/samples/bpf/hash_func01.h b/samples/bpf/hash_func01.h
new file mode 100644
index 000000000..38255812e
--- /dev/null
+++ b/samples/bpf/hash_func01.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: LGPL-2.1
+ *
+ * Based on Paul Hsieh's (LGPG 2.1) hash function
+ * From: http://www.azillionmonkeys.com/qed/hash.html
+ */
+
+#define get16bits(d) (*((const __u16 *) (d)))
+
+static __always_inline
+__u32 SuperFastHash (const char *data, int len, __u32 initval) {
+ __u32 hash = initval;
+ __u32 tmp;
+ int rem;
+
+ if (len <= 0 || data == NULL) return 0;
+
+ rem = len & 3;
+ len >>= 2;
+
+ /* Main loop */
+#pragma clang loop unroll(full)
+ for (;len > 0; len--) {
+ hash += get16bits (data);
+ tmp = (get16bits (data+2) << 11) ^ hash;
+ hash = (hash << 16) ^ tmp;
+ data += 2*sizeof (__u16);
+ hash += hash >> 11;
+ }
+
+ /* Handle end cases */
+ switch (rem) {
+ case 3: hash += get16bits (data);
+ hash ^= hash << 16;
+ hash ^= ((signed char)data[sizeof (__u16)]) << 18;
+ hash += hash >> 11;
+ break;
+ case 2: hash += get16bits (data);
+ hash ^= hash << 11;
+ hash += hash >> 17;
+ break;
+ case 1: hash += (signed char)*data;
+ hash ^= hash << 10;
+ hash += hash >> 1;
+ }
+
+ /* Force "avalanching" of final 127 bits */
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+
+ return hash;
+}
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
new file mode 100644
index 000000000..7f89700a1
--- /dev/null
+++ b/samples/bpf/hbm.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Example program for Host Bandwidth Managment
+ *
+ * This program loads a cgroup skb BPF program to enforce cgroup output
+ * (egress) or input (ingress) bandwidth limits.
+ *
+ * USAGE: hbm [-d] [-l] [-n <id>] [-r <rate>] [-s] [-t <secs>] [-w] [-h] [prog]
+ * Where:
+ * -d Print BPF trace debug buffer
+ * -l Also limit flows doing loopback
+ * -n <#> To create cgroup \"/hbm#\" and attach prog
+ * Default is /hbm1
+ * --no_cn Do not return cn notifications
+ * -r <rate> Rate limit in Mbps
+ * -s Get HBM stats (marked, dropped, etc.)
+ * -t <time> Exit after specified seconds (default is 0)
+ * -w Work conserving flag. cgroup can increase its bandwidth
+ * beyond the rate limit specified while there is available
+ * bandwidth. Current implementation assumes there is only
+ * NIC (eth0), but can be extended to support multiple NICs.
+ * Currrently only supported for egress.
+ * -h Print this info
+ * prog BPF program file name. Name defaults to hbm_out_kern.o
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/unistd.h>
+#include <linux/compiler.h>
+
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+#include <getopt.h>
+
+#include "cgroup_helpers.h"
+#include "hbm.h"
+#include "bpf_util.h"
+#include <bpf/libbpf.h>
+
+bool outFlag = true;
+int minRate = 1000; /* cgroup rate limit in Mbps */
+int rate = 1000; /* can grow if rate conserving is enabled */
+int dur = 1;
+bool stats_flag;
+bool loopback_flag;
+bool debugFlag;
+bool work_conserving_flag;
+bool no_cn_flag;
+bool edt_flag;
+
+static void Usage(void);
+static void read_trace_pipe2(void);
+static void do_error(char *msg, bool errno_flag);
+
+#define DEBUGFS "/sys/kernel/debug/tracing/"
+
+static struct bpf_program *bpf_prog;
+static struct bpf_object *obj;
+static int queue_stats_fd;
+
+static void read_trace_pipe2(void)
+{
+ int trace_fd;
+ FILE *outf;
+ char *outFname = "hbm_out.log";
+
+ trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
+ if (trace_fd < 0) {
+ printf("Error opening trace_pipe\n");
+ return;
+ }
+
+// Future support of ingress
+// if (!outFlag)
+// outFname = "hbm_in.log";
+ outf = fopen(outFname, "w");
+
+ if (outf == NULL)
+ printf("Error creating %s\n", outFname);
+
+ while (1) {
+ static char buf[4097];
+ ssize_t sz;
+
+ sz = read(trace_fd, buf, sizeof(buf) - 1);
+ if (sz > 0) {
+ buf[sz] = 0;
+ puts(buf);
+ if (outf != NULL) {
+ fprintf(outf, "%s\n", buf);
+ fflush(outf);
+ }
+ }
+ }
+}
+
+static void do_error(char *msg, bool errno_flag)
+{
+ if (errno_flag)
+ printf("ERROR: %s, errno: %d\n", msg, errno);
+ else
+ printf("ERROR: %s\n", msg);
+ exit(1);
+}
+
+static int prog_load(char *prog)
+{
+ struct bpf_program *pos;
+ const char *sec_name;
+
+ obj = bpf_object__open_file(prog, NULL);
+ if (libbpf_get_error(obj)) {
+ printf("ERROR: opening BPF object file failed\n");
+ return 1;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ printf("ERROR: loading BPF object file failed\n");
+ goto err;
+ }
+
+ bpf_object__for_each_program(pos, obj) {
+ sec_name = bpf_program__section_name(pos);
+ if (sec_name && !strcmp(sec_name, "cgroup_skb/egress")) {
+ bpf_prog = pos;
+ break;
+ }
+ }
+ if (!bpf_prog) {
+ printf("ERROR: finding a prog in obj file failed\n");
+ goto err;
+ }
+
+ queue_stats_fd = bpf_object__find_map_fd_by_name(obj, "queue_stats");
+ if (queue_stats_fd < 0) {
+ printf("ERROR: finding a map in obj file failed\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ bpf_object__close(obj);
+ return 1;
+}
+
+static int run_bpf_prog(char *prog, int cg_id)
+{
+ struct hbm_queue_stats qstats = {0};
+ char cg_dir[100], cg_pin_path[100];
+ struct bpf_link *link = NULL;
+ int key = 0;
+ int cg1 = 0;
+ int rc = 0;
+
+ sprintf(cg_dir, "/hbm%d", cg_id);
+ rc = prog_load(prog);
+ if (rc != 0)
+ return rc;
+
+ if (setup_cgroup_environment()) {
+ printf("ERROR: setting cgroup environment\n");
+ goto err;
+ }
+ cg1 = create_and_get_cgroup(cg_dir);
+ if (!cg1) {
+ printf("ERROR: create_and_get_cgroup\n");
+ goto err;
+ }
+ if (join_cgroup(cg_dir)) {
+ printf("ERROR: join_cgroup\n");
+ goto err;
+ }
+
+ qstats.rate = rate;
+ qstats.stats = stats_flag ? 1 : 0;
+ qstats.loopback = loopback_flag ? 1 : 0;
+ qstats.no_cn = no_cn_flag ? 1 : 0;
+ if (bpf_map_update_elem(queue_stats_fd, &key, &qstats, BPF_ANY)) {
+ printf("ERROR: Could not update map element\n");
+ goto err;
+ }
+
+ if (!outFlag)
+ bpf_program__set_expected_attach_type(bpf_prog, BPF_CGROUP_INET_INGRESS);
+
+ link = bpf_program__attach_cgroup(bpf_prog, cg1);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach_cgroup failed\n");
+ goto err;
+ }
+
+ sprintf(cg_pin_path, "/sys/fs/bpf/hbm%d", cg_id);
+ rc = bpf_link__pin(link, cg_pin_path);
+ if (rc < 0) {
+ printf("ERROR: bpf_link__pin failed: %d\n", rc);
+ goto err;
+ }
+
+ if (work_conserving_flag) {
+ struct timeval t0, t_last, t_new;
+ FILE *fin;
+ unsigned long long last_eth_tx_bytes, new_eth_tx_bytes;
+ signed long long last_cg_tx_bytes, new_cg_tx_bytes;
+ signed long long delta_time, delta_bytes, delta_rate;
+ int delta_ms;
+#define DELTA_RATE_CHECK 10000 /* in us */
+#define RATE_THRESHOLD 9500000000 /* 9.5 Gbps */
+
+ bpf_map_lookup_elem(queue_stats_fd, &key, &qstats);
+ if (gettimeofday(&t0, NULL) < 0)
+ do_error("gettimeofday failed", true);
+ t_last = t0;
+ fin = fopen("/sys/class/net/eth0/statistics/tx_bytes", "r");
+ if (fscanf(fin, "%llu", &last_eth_tx_bytes) != 1)
+ do_error("fscanf fails", false);
+ fclose(fin);
+ last_cg_tx_bytes = qstats.bytes_total;
+ while (true) {
+ usleep(DELTA_RATE_CHECK);
+ if (gettimeofday(&t_new, NULL) < 0)
+ do_error("gettimeofday failed", true);
+ delta_ms = (t_new.tv_sec - t0.tv_sec) * 1000 +
+ (t_new.tv_usec - t0.tv_usec)/1000;
+ if (delta_ms > dur * 1000)
+ break;
+ delta_time = (t_new.tv_sec - t_last.tv_sec) * 1000000 +
+ (t_new.tv_usec - t_last.tv_usec);
+ if (delta_time == 0)
+ continue;
+ t_last = t_new;
+ fin = fopen("/sys/class/net/eth0/statistics/tx_bytes",
+ "r");
+ if (fscanf(fin, "%llu", &new_eth_tx_bytes) != 1)
+ do_error("fscanf fails", false);
+ fclose(fin);
+ printf(" new_eth_tx_bytes:%llu\n",
+ new_eth_tx_bytes);
+ bpf_map_lookup_elem(queue_stats_fd, &key, &qstats);
+ new_cg_tx_bytes = qstats.bytes_total;
+ delta_bytes = new_eth_tx_bytes - last_eth_tx_bytes;
+ last_eth_tx_bytes = new_eth_tx_bytes;
+ delta_rate = (delta_bytes * 8000000) / delta_time;
+ printf("%5d - eth_rate:%.1fGbps cg_rate:%.3fGbps",
+ delta_ms, delta_rate/1000000000.0,
+ rate/1000.0);
+ if (delta_rate < RATE_THRESHOLD) {
+ /* can increase cgroup rate limit, but first
+ * check if we are using the current limit.
+ * Currently increasing by 6.25%, unknown
+ * if that is the optimal rate.
+ */
+ int rate_diff100;
+
+ delta_bytes = new_cg_tx_bytes -
+ last_cg_tx_bytes;
+ last_cg_tx_bytes = new_cg_tx_bytes;
+ delta_rate = (delta_bytes * 8000000) /
+ delta_time;
+ printf(" rate:%.3fGbps",
+ delta_rate/1000000000.0);
+ rate_diff100 = (((long long)rate)*1000000 -
+ delta_rate) * 100 /
+ (((long long) rate) * 1000000);
+ printf(" rdiff:%d", rate_diff100);
+ if (rate_diff100 <= 3) {
+ rate += (rate >> 4);
+ if (rate > RATE_THRESHOLD / 1000000)
+ rate = RATE_THRESHOLD / 1000000;
+ qstats.rate = rate;
+ printf(" INC\n");
+ } else {
+ printf("\n");
+ }
+ } else {
+ /* Need to decrease cgroup rate limit.
+ * Currently decreasing by 12.5%, unknown
+ * if that is optimal
+ */
+ printf(" DEC\n");
+ rate -= (rate >> 3);
+ if (rate < minRate)
+ rate = minRate;
+ qstats.rate = rate;
+ }
+ if (bpf_map_update_elem(queue_stats_fd, &key, &qstats, BPF_ANY))
+ do_error("update map element fails", false);
+ }
+ } else {
+ sleep(dur);
+ }
+ // Get stats!
+ if (stats_flag && bpf_map_lookup_elem(queue_stats_fd, &key, &qstats)) {
+ char fname[100];
+ FILE *fout;
+
+ if (!outFlag)
+ sprintf(fname, "hbm.%d.in", cg_id);
+ else
+ sprintf(fname, "hbm.%d.out", cg_id);
+ fout = fopen(fname, "w");
+ fprintf(fout, "id:%d\n", cg_id);
+ fprintf(fout, "ERROR: Could not lookup queue_stats\n");
+ fclose(fout);
+ } else if (stats_flag && qstats.lastPacketTime >
+ qstats.firstPacketTime) {
+ long long delta_us = (qstats.lastPacketTime -
+ qstats.firstPacketTime)/1000;
+ unsigned int rate_mbps = ((qstats.bytes_total -
+ qstats.bytes_dropped) * 8 /
+ delta_us);
+ double percent_pkts, percent_bytes;
+ char fname[100];
+ FILE *fout;
+ int k;
+ static const char *returnValNames[] = {
+ "DROP_PKT",
+ "ALLOW_PKT",
+ "DROP_PKT_CWR",
+ "ALLOW_PKT_CWR"
+ };
+#define RET_VAL_COUNT 4
+
+// Future support of ingress
+// if (!outFlag)
+// sprintf(fname, "hbm.%d.in", cg_id);
+// else
+ sprintf(fname, "hbm.%d.out", cg_id);
+ fout = fopen(fname, "w");
+ fprintf(fout, "id:%d\n", cg_id);
+ fprintf(fout, "rate_mbps:%d\n", rate_mbps);
+ fprintf(fout, "duration:%.1f secs\n",
+ (qstats.lastPacketTime - qstats.firstPacketTime) /
+ 1000000000.0);
+ fprintf(fout, "packets:%d\n", (int)qstats.pkts_total);
+ fprintf(fout, "bytes_MB:%d\n", (int)(qstats.bytes_total /
+ 1000000));
+ fprintf(fout, "pkts_dropped:%d\n", (int)qstats.pkts_dropped);
+ fprintf(fout, "bytes_dropped_MB:%d\n",
+ (int)(qstats.bytes_dropped /
+ 1000000));
+ // Marked Pkts and Bytes
+ percent_pkts = (qstats.pkts_marked * 100.0) /
+ (qstats.pkts_total + 1);
+ percent_bytes = (qstats.bytes_marked * 100.0) /
+ (qstats.bytes_total + 1);
+ fprintf(fout, "pkts_marked_percent:%6.2f\n", percent_pkts);
+ fprintf(fout, "bytes_marked_percent:%6.2f\n", percent_bytes);
+
+ // Dropped Pkts and Bytes
+ percent_pkts = (qstats.pkts_dropped * 100.0) /
+ (qstats.pkts_total + 1);
+ percent_bytes = (qstats.bytes_dropped * 100.0) /
+ (qstats.bytes_total + 1);
+ fprintf(fout, "pkts_dropped_percent:%6.2f\n", percent_pkts);
+ fprintf(fout, "bytes_dropped_percent:%6.2f\n", percent_bytes);
+
+ // ECN CE markings
+ percent_pkts = (qstats.pkts_ecn_ce * 100.0) /
+ (qstats.pkts_total + 1);
+ fprintf(fout, "pkts_ecn_ce:%6.2f (%d)\n", percent_pkts,
+ (int)qstats.pkts_ecn_ce);
+
+ // Average cwnd
+ fprintf(fout, "avg cwnd:%d\n",
+ (int)(qstats.sum_cwnd / (qstats.sum_cwnd_cnt + 1)));
+ // Average rtt
+ fprintf(fout, "avg rtt:%d\n",
+ (int)(qstats.sum_rtt / (qstats.pkts_total + 1)));
+ // Average credit
+ if (edt_flag)
+ fprintf(fout, "avg credit_ms:%.03f\n",
+ (qstats.sum_credit /
+ (qstats.pkts_total + 1.0)) / 1000000.0);
+ else
+ fprintf(fout, "avg credit:%d\n",
+ (int)(qstats.sum_credit /
+ (1500 * ((int)qstats.pkts_total ) + 1)));
+
+ // Return values stats
+ for (k = 0; k < RET_VAL_COUNT; k++) {
+ percent_pkts = (qstats.returnValCount[k] * 100.0) /
+ (qstats.pkts_total + 1);
+ fprintf(fout, "%s:%6.2f (%d)\n", returnValNames[k],
+ percent_pkts, (int)qstats.returnValCount[k]);
+ }
+ fclose(fout);
+ }
+
+ if (debugFlag)
+ read_trace_pipe2();
+ goto cleanup;
+
+err:
+ rc = 1;
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+
+ if (cg1 != -1)
+ close(cg1);
+
+ if (rc != 0)
+ cleanup_cgroup_environment();
+ return rc;
+}
+
+static void Usage(void)
+{
+ printf("This program loads a cgroup skb BPF program to enforce\n"
+ "cgroup output (egress) bandwidth limits.\n\n"
+ "USAGE: hbm [-o] [-d] [-l] [-n <id>] [--no_cn] [-r <rate>]\n"
+ " [-s] [-t <secs>] [-w] [-h] [prog]\n"
+ " Where:\n"
+ " -o indicates egress direction (default)\n"
+ " -d print BPF trace debug buffer\n"
+ " --edt use fq's Earliest Departure Time\n"
+ " -l also limit flows using loopback\n"
+ " -n <#> to create cgroup \"/hbm#\" and attach prog\n"
+ " Default is /hbm1\n"
+ " --no_cn disable CN notifications\n"
+ " -r <rate> Rate in Mbps\n"
+ " -s Update HBM stats\n"
+ " -t <time> Exit after specified seconds (default is 0)\n"
+ " -w Work conserving flag. cgroup can increase\n"
+ " bandwidth beyond the rate limit specified\n"
+ " while there is available bandwidth. Current\n"
+ " implementation assumes there is only eth0\n"
+ " but can be extended to support multiple NICs\n"
+ " -h print this info\n"
+ " prog BPF program file name. Name defaults to\n"
+ " hbm_out_kern.o\n");
+}
+
+int main(int argc, char **argv)
+{
+ char *prog = "hbm_out_kern.o";
+ int k;
+ int cg_id = 1;
+ char *optstring = "iodln:r:st:wh";
+ struct option loptions[] = {
+ {"no_cn", 0, NULL, 1},
+ {"edt", 0, NULL, 2},
+ {NULL, 0, NULL, 0}
+ };
+
+ while ((k = getopt_long(argc, argv, optstring, loptions, NULL)) != -1) {
+ switch (k) {
+ case 1:
+ no_cn_flag = true;
+ break;
+ case 2:
+ prog = "hbm_edt_kern.o";
+ edt_flag = true;
+ break;
+ case'o':
+ break;
+ case 'd':
+ debugFlag = true;
+ break;
+ case 'l':
+ loopback_flag = true;
+ break;
+ case 'n':
+ cg_id = atoi(optarg);
+ break;
+ case 'r':
+ minRate = atoi(optarg) * 1.024;
+ rate = minRate;
+ break;
+ case 's':
+ stats_flag = true;
+ break;
+ case 't':
+ dur = atoi(optarg);
+ break;
+ case 'w':
+ work_conserving_flag = true;
+ break;
+ case '?':
+ if (optopt == 'n' || optopt == 'r' || optopt == 't')
+ fprintf(stderr,
+ "Option -%c requires an argument.\n\n",
+ optopt);
+ case 'h':
+ __fallthrough;
+ default:
+ Usage();
+ return 0;
+ }
+ }
+
+ if (optind < argc)
+ prog = argv[optind];
+ printf("HBM prog: %s\n", prog != NULL ? prog : "NULL");
+
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
+ return run_bpf_prog(prog, cg_id);
+}
diff --git a/samples/bpf/hbm.h b/samples/bpf/hbm.h
new file mode 100644
index 000000000..f0963ed6a
--- /dev/null
+++ b/samples/bpf/hbm.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2019 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Include file for Host Bandwidth Management (HBM) programs
+ */
+struct hbm_vqueue {
+ struct bpf_spin_lock lock;
+ /* 4 byte hole */
+ unsigned long long lasttime; /* In ns */
+ int credit; /* In bytes */
+ unsigned int rate; /* In bytes per NS << 20 */
+};
+
+struct hbm_queue_stats {
+ unsigned long rate; /* in Mbps*/
+ unsigned long stats:1, /* get HBM stats (marked, dropped,..) */
+ loopback:1, /* also limit flows using loopback */
+ no_cn:1; /* do not use cn flags */
+ unsigned long long pkts_marked;
+ unsigned long long bytes_marked;
+ unsigned long long pkts_dropped;
+ unsigned long long bytes_dropped;
+ unsigned long long pkts_total;
+ unsigned long long bytes_total;
+ unsigned long long firstPacketTime;
+ unsigned long long lastPacketTime;
+ unsigned long long pkts_ecn_ce;
+ unsigned long long returnValCount[4];
+ unsigned long long sum_cwnd;
+ unsigned long long sum_rtt;
+ unsigned long long sum_cwnd_cnt;
+ long long sum_credit;
+};
diff --git a/samples/bpf/hbm_edt_kern.c b/samples/bpf/hbm_edt_kern.c
new file mode 100644
index 000000000..a65b677ac
--- /dev/null
+++ b/samples/bpf/hbm_edt_kern.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Sample Host Bandwidth Manager (HBM) BPF program.
+ *
+ * A cgroup skb BPF egress program to limit cgroup output bandwidth.
+ * It uses a modified virtual token bucket queue to limit average
+ * egress bandwidth. The implementation uses credits instead of tokens.
+ * Negative credits imply that queueing would have happened (this is
+ * a virtual queue, so no queueing is done by it. However, queueing may
+ * occur at the actual qdisc (which is not used for rate limiting).
+ *
+ * This implementation uses 3 thresholds, one to start marking packets and
+ * the other two to drop packets:
+ * CREDIT
+ * - <--------------------------|------------------------> +
+ * | | | 0
+ * | Large pkt |
+ * | drop thresh |
+ * Small pkt drop Mark threshold
+ * thresh
+ *
+ * The effect of marking depends on the type of packet:
+ * a) If the packet is ECN enabled and it is a TCP packet, then the packet
+ * is ECN marked.
+ * b) If the packet is a TCP packet, then we probabilistically call tcp_cwr
+ * to reduce the congestion window. The current implementation uses a linear
+ * distribution (0% probability at marking threshold, 100% probability
+ * at drop threshold).
+ * c) If the packet is not a TCP packet, then it is dropped.
+ *
+ * If the credit is below the drop threshold, the packet is dropped. If it
+ * is a TCP packet, then it also calls tcp_cwr since packets dropped by
+ * by a cgroup skb BPF program do not automatically trigger a call to
+ * tcp_cwr in the current kernel code.
+ *
+ * This BPF program actually uses 2 drop thresholds, one threshold
+ * for larger packets (>= 120 bytes) and another for smaller packets. This
+ * protects smaller packets such as SYNs, ACKs, etc.
+ *
+ * The default bandwidth limit is set at 1Gbps but this can be changed by
+ * a user program through a shared BPF map. In addition, by default this BPF
+ * program does not limit connections using loopback. This behavior can be
+ * overwritten by the user program. There is also an option to calculate
+ * some statistics, such as percent of packets marked or dropped, which
+ * a user program, such as hbm, can access.
+ */
+
+#include "hbm_kern.h"
+
+SEC("cgroup_skb/egress")
+int _hbm_out_cg(struct __sk_buff *skb)
+{
+ long long delta = 0, delta_send;
+ unsigned long long curtime, sendtime;
+ struct hbm_queue_stats *qsp = NULL;
+ unsigned int queue_index = 0;
+ bool congestion_flag = false;
+ bool ecn_ce_flag = false;
+ struct hbm_pkt_info pkti = {};
+ struct hbm_vqueue *qdp;
+ bool drop_flag = false;
+ bool cwr_flag = false;
+ int len = skb->len;
+ int rv = ALLOW_PKT;
+
+ qsp = bpf_map_lookup_elem(&queue_stats, &queue_index);
+
+ // Check if we should ignore loopback traffic
+ if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1))
+ return ALLOW_PKT;
+
+ hbm_get_pkt_info(skb, &pkti);
+
+ // We may want to account for the length of headers in len
+ // calculation, like ETH header + overhead, specially if it
+ // is a gso packet. But I am not doing it right now.
+
+ qdp = bpf_get_local_storage(&queue_state, 0);
+ if (!qdp)
+ return ALLOW_PKT;
+ if (qdp->lasttime == 0)
+ hbm_init_edt_vqueue(qdp, 1024);
+
+ curtime = bpf_ktime_get_ns();
+
+ // Begin critical section
+ bpf_spin_lock(&qdp->lock);
+ delta = qdp->lasttime - curtime;
+ // bound bursts to 100us
+ if (delta < -BURST_SIZE_NS) {
+ // negative delta is a credit that allows bursts
+ qdp->lasttime = curtime - BURST_SIZE_NS;
+ delta = -BURST_SIZE_NS;
+ }
+ sendtime = qdp->lasttime;
+ delta_send = BYTES_TO_NS(len, qdp->rate);
+ __sync_add_and_fetch(&(qdp->lasttime), delta_send);
+ bpf_spin_unlock(&qdp->lock);
+ // End critical section
+
+ // Set EDT of packet
+ skb->tstamp = sendtime;
+
+ // Check if we should update rate
+ if (qsp != NULL && (qsp->rate * 128) != qdp->rate)
+ qdp->rate = qsp->rate * 128;
+
+ // Set flags (drop, congestion, cwr)
+ // last packet will be sent in the future, bound latency
+ if (delta > DROP_THRESH_NS || (delta > LARGE_PKT_DROP_THRESH_NS &&
+ len > LARGE_PKT_THRESH)) {
+ drop_flag = true;
+ if (pkti.is_tcp && pkti.ecn == 0)
+ cwr_flag = true;
+ } else if (delta > MARK_THRESH_NS) {
+ if (pkti.is_tcp)
+ congestion_flag = true;
+ else
+ drop_flag = true;
+ }
+
+ if (congestion_flag) {
+ if (bpf_skb_ecn_set_ce(skb)) {
+ ecn_ce_flag = true;
+ } else {
+ if (pkti.is_tcp) {
+ unsigned int rand = bpf_get_prandom_u32();
+
+ if (delta >= MARK_THRESH_NS +
+ (rand % MARK_REGION_SIZE_NS)) {
+ // Do congestion control
+ cwr_flag = true;
+ }
+ } else if (len > LARGE_PKT_THRESH) {
+ // Problem if too many small packets?
+ drop_flag = true;
+ congestion_flag = false;
+ }
+ }
+ }
+
+ if (pkti.is_tcp && drop_flag && pkti.packets_out <= 1) {
+ drop_flag = false;
+ cwr_flag = true;
+ congestion_flag = false;
+ }
+
+ if (qsp != NULL && qsp->no_cn)
+ cwr_flag = false;
+
+ hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag,
+ cwr_flag, ecn_ce_flag, &pkti, (int) delta);
+
+ if (drop_flag) {
+ __sync_add_and_fetch(&(qdp->lasttime), -delta_send);
+ rv = DROP_PKT;
+ }
+
+ if (cwr_flag)
+ rv |= CWR;
+ return rv;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/hbm_kern.h b/samples/bpf/hbm_kern.h
new file mode 100644
index 000000000..1752a46a2
--- /dev/null
+++ b/samples/bpf/hbm_kern.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2019 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Include file for sample Host Bandwidth Manager (HBM) BPF programs
+ */
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/filter.h>
+#include <uapi/linux/pkt_cls.h>
+#include <net/ipv6.h>
+#include <net/inet_ecn.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include "hbm.h"
+
+#define DROP_PKT 0
+#define ALLOW_PKT 1
+#define TCP_ECN_OK 1
+#define CWR 2
+
+#ifndef HBM_DEBUG // Define HBM_DEBUG to enable debugging
+#undef bpf_printk
+#define bpf_printk(fmt, ...)
+#endif
+
+#define INITIAL_CREDIT_PACKETS 100
+#define MAX_BYTES_PER_PACKET 1500
+#define MARK_THRESH (40 * MAX_BYTES_PER_PACKET)
+#define DROP_THRESH (80 * 5 * MAX_BYTES_PER_PACKET)
+#define LARGE_PKT_DROP_THRESH (DROP_THRESH - (15 * MAX_BYTES_PER_PACKET))
+#define MARK_REGION_SIZE (LARGE_PKT_DROP_THRESH - MARK_THRESH)
+#define LARGE_PKT_THRESH 120
+#define MAX_CREDIT (100 * MAX_BYTES_PER_PACKET)
+#define INIT_CREDIT (INITIAL_CREDIT_PACKETS * MAX_BYTES_PER_PACKET)
+
+// Time base accounting for fq's EDT
+#define BURST_SIZE_NS 100000 // 100us
+#define MARK_THRESH_NS 50000 // 50us
+#define DROP_THRESH_NS 500000 // 500us
+// Reserve 20us of queuing for small packets (less than 120 bytes)
+#define LARGE_PKT_DROP_THRESH_NS (DROP_THRESH_NS - 20000)
+#define MARK_REGION_SIZE_NS (LARGE_PKT_DROP_THRESH_NS - MARK_THRESH_NS)
+
+// rate in bytes per ns << 20
+#define CREDIT_PER_NS(delta, rate) ((((u64)(delta)) * (rate)) >> 20)
+#define BYTES_PER_NS(delta, rate) ((((u64)(delta)) * (rate)) >> 20)
+#define BYTES_TO_NS(bytes, rate) div64_u64(((u64)(bytes)) << 20, (u64)(rate))
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __type(key, struct bpf_cgroup_storage_key);
+ __type(value, struct hbm_vqueue);
+} queue_state SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, u32);
+ __type(value, struct hbm_queue_stats);
+} queue_stats SEC(".maps");
+
+struct hbm_pkt_info {
+ int cwnd;
+ int rtt;
+ int packets_out;
+ bool is_ip;
+ bool is_tcp;
+ short ecn;
+};
+
+static int get_tcp_info(struct __sk_buff *skb, struct hbm_pkt_info *pkti)
+{
+ struct bpf_sock *sk;
+ struct bpf_tcp_sock *tp;
+
+ sk = skb->sk;
+ if (sk) {
+ sk = bpf_sk_fullsock(sk);
+ if (sk) {
+ if (sk->protocol == IPPROTO_TCP) {
+ tp = bpf_tcp_sock(sk);
+ if (tp) {
+ pkti->cwnd = tp->snd_cwnd;
+ pkti->rtt = tp->srtt_us >> 3;
+ pkti->packets_out = tp->packets_out;
+ return 0;
+ }
+ }
+ }
+ }
+ pkti->cwnd = 0;
+ pkti->rtt = 0;
+ pkti->packets_out = 0;
+ return 1;
+}
+
+static void hbm_get_pkt_info(struct __sk_buff *skb,
+ struct hbm_pkt_info *pkti)
+{
+ struct iphdr iph;
+ struct ipv6hdr *ip6h;
+
+ pkti->cwnd = 0;
+ pkti->rtt = 0;
+ bpf_skb_load_bytes(skb, 0, &iph, 12);
+ if (iph.version == 6) {
+ ip6h = (struct ipv6hdr *)&iph;
+ pkti->is_ip = true;
+ pkti->is_tcp = (ip6h->nexthdr == 6);
+ pkti->ecn = (ip6h->flow_lbl[0] >> 4) & INET_ECN_MASK;
+ } else if (iph.version == 4) {
+ pkti->is_ip = true;
+ pkti->is_tcp = (iph.protocol == 6);
+ pkti->ecn = iph.tos & INET_ECN_MASK;
+ } else {
+ pkti->is_ip = false;
+ pkti->is_tcp = false;
+ pkti->ecn = 0;
+ }
+ if (pkti->is_tcp)
+ get_tcp_info(skb, pkti);
+}
+
+static __always_inline void hbm_init_vqueue(struct hbm_vqueue *qdp, int rate)
+{
+ bpf_printk("Initializing queue_state, rate:%d\n", rate * 128);
+ qdp->lasttime = bpf_ktime_get_ns();
+ qdp->credit = INIT_CREDIT;
+ qdp->rate = rate * 128;
+}
+
+static __always_inline void hbm_init_edt_vqueue(struct hbm_vqueue *qdp,
+ int rate)
+{
+ unsigned long long curtime;
+
+ curtime = bpf_ktime_get_ns();
+ bpf_printk("Initializing queue_state, rate:%d\n", rate * 128);
+ qdp->lasttime = curtime - BURST_SIZE_NS; // support initial burst
+ qdp->credit = 0; // not used
+ qdp->rate = rate * 128;
+}
+
+static __always_inline void hbm_update_stats(struct hbm_queue_stats *qsp,
+ int len,
+ unsigned long long curtime,
+ bool congestion_flag,
+ bool drop_flag,
+ bool cwr_flag,
+ bool ecn_ce_flag,
+ struct hbm_pkt_info *pkti,
+ int credit)
+{
+ int rv = ALLOW_PKT;
+
+ if (qsp != NULL) {
+ // Following is needed for work conserving
+ __sync_add_and_fetch(&(qsp->bytes_total), len);
+ if (qsp->stats) {
+ // Optionally update statistics
+ if (qsp->firstPacketTime == 0)
+ qsp->firstPacketTime = curtime;
+ qsp->lastPacketTime = curtime;
+ __sync_add_and_fetch(&(qsp->pkts_total), 1);
+ if (congestion_flag) {
+ __sync_add_and_fetch(&(qsp->pkts_marked), 1);
+ __sync_add_and_fetch(&(qsp->bytes_marked), len);
+ }
+ if (drop_flag) {
+ __sync_add_and_fetch(&(qsp->pkts_dropped), 1);
+ __sync_add_and_fetch(&(qsp->bytes_dropped),
+ len);
+ }
+ if (ecn_ce_flag)
+ __sync_add_and_fetch(&(qsp->pkts_ecn_ce), 1);
+ if (pkti->cwnd) {
+ __sync_add_and_fetch(&(qsp->sum_cwnd),
+ pkti->cwnd);
+ __sync_add_and_fetch(&(qsp->sum_cwnd_cnt), 1);
+ }
+ if (pkti->rtt)
+ __sync_add_and_fetch(&(qsp->sum_rtt),
+ pkti->rtt);
+ __sync_add_and_fetch(&(qsp->sum_credit), credit);
+
+ if (drop_flag)
+ rv = DROP_PKT;
+ if (cwr_flag)
+ rv |= 2;
+ if (rv == DROP_PKT)
+ __sync_add_and_fetch(&(qsp->returnValCount[0]),
+ 1);
+ else if (rv == ALLOW_PKT)
+ __sync_add_and_fetch(&(qsp->returnValCount[1]),
+ 1);
+ else if (rv == 2)
+ __sync_add_and_fetch(&(qsp->returnValCount[2]),
+ 1);
+ else if (rv == 3)
+ __sync_add_and_fetch(&(qsp->returnValCount[3]),
+ 1);
+ }
+ }
+}
diff --git a/samples/bpf/hbm_out_kern.c b/samples/bpf/hbm_out_kern.c
new file mode 100644
index 000000000..829934bd4
--- /dev/null
+++ b/samples/bpf/hbm_out_kern.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Sample Host Bandwidth Manager (HBM) BPF program.
+ *
+ * A cgroup skb BPF egress program to limit cgroup output bandwidth.
+ * It uses a modified virtual token bucket queue to limit average
+ * egress bandwidth. The implementation uses credits instead of tokens.
+ * Negative credits imply that queueing would have happened (this is
+ * a virtual queue, so no queueing is done by it. However, queueing may
+ * occur at the actual qdisc (which is not used for rate limiting).
+ *
+ * This implementation uses 3 thresholds, one to start marking packets and
+ * the other two to drop packets:
+ * CREDIT
+ * - <--------------------------|------------------------> +
+ * | | | 0
+ * | Large pkt |
+ * | drop thresh |
+ * Small pkt drop Mark threshold
+ * thresh
+ *
+ * The effect of marking depends on the type of packet:
+ * a) If the packet is ECN enabled and it is a TCP packet, then the packet
+ * is ECN marked.
+ * b) If the packet is a TCP packet, then we probabilistically call tcp_cwr
+ * to reduce the congestion window. The current implementation uses a linear
+ * distribution (0% probability at marking threshold, 100% probability
+ * at drop threshold).
+ * c) If the packet is not a TCP packet, then it is dropped.
+ *
+ * If the credit is below the drop threshold, the packet is dropped. If it
+ * is a TCP packet, then it also calls tcp_cwr since packets dropped by
+ * by a cgroup skb BPF program do not automatically trigger a call to
+ * tcp_cwr in the current kernel code.
+ *
+ * This BPF program actually uses 2 drop thresholds, one threshold
+ * for larger packets (>= 120 bytes) and another for smaller packets. This
+ * protects smaller packets such as SYNs, ACKs, etc.
+ *
+ * The default bandwidth limit is set at 1Gbps but this can be changed by
+ * a user program through a shared BPF map. In addition, by default this BPF
+ * program does not limit connections using loopback. This behavior can be
+ * overwritten by the user program. There is also an option to calculate
+ * some statistics, such as percent of packets marked or dropped, which
+ * the user program can access.
+ *
+ * A latter patch provides such a program (hbm.c)
+ */
+
+#include "hbm_kern.h"
+
+SEC("cgroup_skb/egress")
+int _hbm_out_cg(struct __sk_buff *skb)
+{
+ struct hbm_pkt_info pkti;
+ int len = skb->len;
+ unsigned int queue_index = 0;
+ unsigned long long curtime;
+ int credit;
+ signed long long delta = 0, new_credit;
+ int max_credit = MAX_CREDIT;
+ bool congestion_flag = false;
+ bool drop_flag = false;
+ bool cwr_flag = false;
+ bool ecn_ce_flag = false;
+ struct hbm_vqueue *qdp;
+ struct hbm_queue_stats *qsp = NULL;
+ int rv = ALLOW_PKT;
+
+ qsp = bpf_map_lookup_elem(&queue_stats, &queue_index);
+ if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1))
+ return ALLOW_PKT;
+
+ hbm_get_pkt_info(skb, &pkti);
+
+ // We may want to account for the length of headers in len
+ // calculation, like ETH header + overhead, specially if it
+ // is a gso packet. But I am not doing it right now.
+
+ qdp = bpf_get_local_storage(&queue_state, 0);
+ if (!qdp)
+ return ALLOW_PKT;
+ else if (qdp->lasttime == 0)
+ hbm_init_vqueue(qdp, 1024);
+
+ curtime = bpf_ktime_get_ns();
+
+ // Begin critical section
+ bpf_spin_lock(&qdp->lock);
+ credit = qdp->credit;
+ delta = curtime - qdp->lasttime;
+ /* delta < 0 implies that another process with a curtime greater
+ * than ours beat us to the critical section and already added
+ * the new credit, so we should not add it ourselves
+ */
+ if (delta > 0) {
+ qdp->lasttime = curtime;
+ new_credit = credit + CREDIT_PER_NS(delta, qdp->rate);
+ if (new_credit > MAX_CREDIT)
+ credit = MAX_CREDIT;
+ else
+ credit = new_credit;
+ }
+ credit -= len;
+ qdp->credit = credit;
+ bpf_spin_unlock(&qdp->lock);
+ // End critical section
+
+ // Check if we should update rate
+ if (qsp != NULL && (qsp->rate * 128) != qdp->rate) {
+ qdp->rate = qsp->rate * 128;
+ bpf_printk("Updating rate: %d (1sec:%llu bits)\n",
+ (int)qdp->rate,
+ CREDIT_PER_NS(1000000000, qdp->rate) * 8);
+ }
+
+ // Set flags (drop, congestion, cwr)
+ // Dropping => we are congested, so ignore congestion flag
+ if (credit < -DROP_THRESH ||
+ (len > LARGE_PKT_THRESH && credit < -LARGE_PKT_DROP_THRESH)) {
+ // Very congested, set drop packet
+ drop_flag = true;
+ if (pkti.ecn)
+ congestion_flag = true;
+ else if (pkti.is_tcp)
+ cwr_flag = true;
+ } else if (credit < 0) {
+ // Congested, set congestion flag
+ if (pkti.ecn || pkti.is_tcp) {
+ if (credit < -MARK_THRESH)
+ congestion_flag = true;
+ else
+ congestion_flag = false;
+ } else {
+ congestion_flag = true;
+ }
+ }
+
+ if (congestion_flag) {
+ if (bpf_skb_ecn_set_ce(skb)) {
+ ecn_ce_flag = true;
+ } else {
+ if (pkti.is_tcp) {
+ unsigned int rand = bpf_get_prandom_u32();
+
+ if (-credit >= MARK_THRESH +
+ (rand % MARK_REGION_SIZE)) {
+ // Do congestion control
+ cwr_flag = true;
+ }
+ } else if (len > LARGE_PKT_THRESH) {
+ // Problem if too many small packets?
+ drop_flag = true;
+ }
+ }
+ }
+
+ if (qsp != NULL)
+ if (qsp->no_cn)
+ cwr_flag = false;
+
+ hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag,
+ cwr_flag, ecn_ce_flag, &pkti, credit);
+
+ if (drop_flag) {
+ __sync_add_and_fetch(&(qdp->credit), len);
+ rv = DROP_PKT;
+ }
+
+ if (cwr_flag)
+ rv |= 2;
+ return rv;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/ibumad_kern.c b/samples/bpf/ibumad_kern.c
new file mode 100644
index 000000000..9b1932310
--- /dev/null
+++ b/samples/bpf/ibumad_kern.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+
+/*
+ * ibumad BPF sample kernel side
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Copyright(c) 2018 Ira Weiny, Intel Corporation
+ */
+
+#define KBUILD_MODNAME "ibumad_count_pkts_by_class"
+#include <uapi/linux/bpf.h>
+
+#include <bpf/bpf_helpers.h>
+
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32); /* class; u32 required */
+ __type(value, u64); /* count of mads read */
+ __uint(max_entries, 256); /* Room for all Classes */
+} read_count SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32); /* class; u32 required */
+ __type(value, u64); /* count of mads written */
+ __uint(max_entries, 256); /* Room for all Classes */
+} write_count SEC(".maps");
+
+#undef DEBUG
+#ifndef DEBUG
+#undef bpf_printk
+#define bpf_printk(fmt, ...)
+#endif
+
+/* Taken from the current format defined in
+ * include/trace/events/ib_umad.h
+ * and
+ * /sys/kernel/debug/tracing/events/ib_umad/ib_umad_read/format
+ * /sys/kernel/debug/tracing/events/ib_umad/ib_umad_write/format
+ */
+struct ib_umad_rw_args {
+ u64 pad;
+ u8 port_num;
+ u8 sl;
+ u8 path_bits;
+ u8 grh_present;
+ u32 id;
+ u32 status;
+ u32 timeout_ms;
+ u32 retires;
+ u32 length;
+ u32 qpn;
+ u32 qkey;
+ u8 gid_index;
+ u8 hop_limit;
+ u16 lid;
+ u16 attr_id;
+ u16 pkey_index;
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ u32 flow_label;
+ u16 mad_status;
+ u16 class_specific;
+ u32 attr_mod;
+ u64 tid;
+ u8 gid[16];
+ u32 dev_index;
+ u8 traffic_class;
+};
+
+SEC("tracepoint/ib_umad/ib_umad_read_recv")
+int on_ib_umad_read_recv(struct ib_umad_rw_args *ctx)
+{
+ u64 zero = 0, *val;
+ u8 class = ctx->mgmt_class;
+
+ bpf_printk("ib_umad read recv : class 0x%x\n", class);
+
+ val = bpf_map_lookup_elem(&read_count, &class);
+ if (!val) {
+ bpf_map_update_elem(&read_count, &class, &zero, BPF_NOEXIST);
+ val = bpf_map_lookup_elem(&read_count, &class);
+ if (!val)
+ return 0;
+ }
+
+ (*val) += 1;
+
+ return 0;
+}
+SEC("tracepoint/ib_umad/ib_umad_read_send")
+int on_ib_umad_read_send(struct ib_umad_rw_args *ctx)
+{
+ u64 zero = 0, *val;
+ u8 class = ctx->mgmt_class;
+
+ bpf_printk("ib_umad read send : class 0x%x\n", class);
+
+ val = bpf_map_lookup_elem(&read_count, &class);
+ if (!val) {
+ bpf_map_update_elem(&read_count, &class, &zero, BPF_NOEXIST);
+ val = bpf_map_lookup_elem(&read_count, &class);
+ if (!val)
+ return 0;
+ }
+
+ (*val) += 1;
+
+ return 0;
+}
+SEC("tracepoint/ib_umad/ib_umad_write")
+int on_ib_umad_write(struct ib_umad_rw_args *ctx)
+{
+ u64 zero = 0, *val;
+ u8 class = ctx->mgmt_class;
+
+ bpf_printk("ib_umad write : class 0x%x\n", class);
+
+ val = bpf_map_lookup_elem(&write_count, &class);
+ if (!val) {
+ bpf_map_update_elem(&write_count, &class, &zero, BPF_NOEXIST);
+ val = bpf_map_lookup_elem(&write_count, &class);
+ if (!val)
+ return 0;
+ }
+
+ (*val) += 1;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/ibumad_user.c b/samples/bpf/ibumad_user.c
new file mode 100644
index 000000000..d074c978a
--- /dev/null
+++ b/samples/bpf/ibumad_user.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+
+/*
+ * ibumad BPF sample user side
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Copyright(c) 2018 Ira Weiny, Intel Corporation
+ */
+
+#include <linux/bpf.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <limits.h>
+
+#include <getopt.h>
+#include <net/if.h>
+
+#include <bpf/bpf.h>
+#include "bpf_util.h"
+#include <bpf/libbpf.h>
+
+static struct bpf_link *tp_links[3];
+static struct bpf_object *obj;
+static int map_fd[2];
+static int tp_cnt;
+
+static void dump_counts(int fd)
+{
+ __u32 key;
+ __u64 value;
+
+ for (key = 0; key < 256; key++) {
+ if (bpf_map_lookup_elem(fd, &key, &value)) {
+ printf("failed to read key %u\n", key);
+ continue;
+ }
+ if (value)
+ printf("0x%02x : %llu\n", key, value);
+ }
+}
+
+static void dump_all_counts(void)
+{
+ printf("Read 'Class : count'\n");
+ dump_counts(map_fd[0]);
+ printf("Write 'Class : count'\n");
+ dump_counts(map_fd[1]);
+}
+
+static void dump_exit(int sig)
+{
+ dump_all_counts();
+ /* Detach tracepoints */
+ while (tp_cnt)
+ bpf_link__destroy(tp_links[--tp_cnt]);
+
+ bpf_object__close(obj);
+ exit(0);
+}
+
+static const struct option long_options[] = {
+ {"help", no_argument, NULL, 'h'},
+ {"delay", required_argument, NULL, 'd'},
+};
+
+static void usage(char *cmd)
+{
+ printf("eBPF test program to count packets from various IP addresses\n"
+ "Usage: %s <options>\n"
+ " --help, -h this menu\n"
+ " --delay, -d <delay> wait <delay> sec between prints [1 - 1000000]\n"
+ , cmd
+ );
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_program *prog;
+ unsigned long delay = 5;
+ char filename[256];
+ int longindex = 0;
+ int opt, err = -1;
+
+ while ((opt = getopt_long(argc, argv, "hd:rSw",
+ long_options, &longindex)) != -1) {
+ switch (opt) {
+ case 'd':
+ delay = strtoul(optarg, NULL, 0);
+ if (delay == ULONG_MAX || delay < 0 ||
+ delay > 1000000) {
+ fprintf(stderr, "ERROR: invalid delay : %s\n",
+ optarg);
+ usage(argv[0]);
+ return 1;
+ }
+ break;
+ default:
+ case 'h':
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ /* Do one final dump when exiting */
+ signal(SIGINT, dump_exit);
+ signal(SIGTERM, dump_exit);
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return err;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "read_count");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "write_count");
+ if (map_fd[0] < 0 || map_fd[1] < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ tp_links[tp_cnt] = bpf_program__attach(prog);
+ if (libbpf_get_error(tp_links[tp_cnt])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ tp_links[tp_cnt] = NULL;
+ goto cleanup;
+ }
+ tp_cnt++;
+ }
+
+ while (1) {
+ sleep(delay);
+ dump_all_counts();
+ }
+ err = 0;
+
+cleanup:
+ /* Detach tracepoints */
+ while (tp_cnt)
+ bpf_link__destroy(tp_links[--tp_cnt]);
+
+ bpf_object__close(obj);
+ return err;
+}
diff --git a/samples/bpf/lathist_kern.c b/samples/bpf/lathist_kern.c
new file mode 100644
index 000000000..4adfcbbe6
--- /dev/null
+++ b/samples/bpf/lathist_kern.c
@@ -0,0 +1,99 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2015 BMW Car IT GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define MAX_ENTRIES 20
+#define MAX_CPU 4
+
+/* We need to stick to static allocated memory (an array instead of
+ * hash table) because managing dynamic memory from the
+ * trace_preempt_[on|off] tracepoints hooks is not supported.
+ */
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, u64);
+ __uint(max_entries, MAX_CPU);
+} my_map SEC(".maps");
+
+SEC("kprobe/trace_preempt_off")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ int cpu = bpf_get_smp_processor_id();
+ u64 *ts = bpf_map_lookup_elem(&my_map, &cpu);
+
+ if (ts)
+ *ts = bpf_ktime_get_ns();
+
+ return 0;
+}
+
+static unsigned int log2(unsigned int v)
+{
+ unsigned int r;
+ unsigned int shift;
+
+ r = (v > 0xFFFF) << 4; v >>= r;
+ shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
+ shift = (v > 0xF) << 2; v >>= shift; r |= shift;
+ shift = (v > 0x3) << 1; v >>= shift; r |= shift;
+ r |= (v >> 1);
+
+ return r;
+}
+
+static unsigned int log2l(unsigned long v)
+{
+ unsigned int hi = v >> 32;
+
+ if (hi)
+ return log2(hi) + 32;
+ else
+ return log2(v);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, long);
+ __uint(max_entries, MAX_CPU * MAX_ENTRIES);
+} my_lat SEC(".maps");
+
+SEC("kprobe/trace_preempt_on")
+int bpf_prog2(struct pt_regs *ctx)
+{
+ u64 *ts, cur_ts, delta;
+ int key, cpu;
+ long *val;
+
+ cpu = bpf_get_smp_processor_id();
+ ts = bpf_map_lookup_elem(&my_map, &cpu);
+ if (!ts)
+ return 0;
+
+ cur_ts = bpf_ktime_get_ns();
+ delta = log2l(cur_ts - *ts);
+
+ if (delta > MAX_ENTRIES - 1)
+ delta = MAX_ENTRIES - 1;
+
+ key = cpu * MAX_ENTRIES + delta;
+ val = bpf_map_lookup_elem(&my_lat, &key);
+ if (val)
+ __sync_fetch_and_add((long *)val, 1);
+
+ return 0;
+
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/lathist_user.c b/samples/bpf/lathist_user.c
new file mode 100644
index 000000000..7d8ff2418
--- /dev/null
+++ b/samples/bpf/lathist_user.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2015 BMW Car IT GmbH
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+
+#define MAX_ENTRIES 20
+#define MAX_CPU 4
+#define MAX_STARS 40
+
+struct cpu_hist {
+ long data[MAX_ENTRIES];
+ long max;
+};
+
+static struct cpu_hist cpu_hist[MAX_CPU];
+
+static void stars(char *str, long val, long max, int width)
+{
+ int i;
+
+ for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++)
+ str[i] = '*';
+ if (val > max)
+ str[i - 1] = '+';
+ str[i] = '\0';
+}
+
+static void print_hist(void)
+{
+ char starstr[MAX_STARS];
+ struct cpu_hist *hist;
+ int i, j;
+
+ /* clear screen */
+ printf("\033[2J");
+
+ for (j = 0; j < MAX_CPU; j++) {
+ hist = &cpu_hist[j];
+
+ /* ignore CPUs without data (maybe offline?) */
+ if (hist->max == 0)
+ continue;
+
+ printf("CPU %d\n", j);
+ printf(" latency : count distribution\n");
+ for (i = 1; i <= MAX_ENTRIES; i++) {
+ stars(starstr, hist->data[i - 1], hist->max, MAX_STARS);
+ printf("%8ld -> %-8ld : %-8ld |%-*s|\n",
+ (1l << i) >> 1, (1l << i) - 1,
+ hist->data[i - 1], MAX_STARS, starstr);
+ }
+ }
+}
+
+static void get_data(int fd)
+{
+ long key, value;
+ int c, i;
+
+ for (i = 0; i < MAX_CPU; i++)
+ cpu_hist[i].max = 0;
+
+ for (c = 0; c < MAX_CPU; c++) {
+ for (i = 0; i < MAX_ENTRIES; i++) {
+ key = c * MAX_ENTRIES + i;
+ bpf_map_lookup_elem(fd, &key, &value);
+
+ cpu_hist[c].data[i] = value;
+ if (value > cpu_hist[c].max)
+ cpu_hist[c].max = value;
+ }
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_link *links[2];
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ char filename[256];
+ int map_fd, i = 0;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "my_lat");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ links[i] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[i])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[i] = NULL;
+ goto cleanup;
+ }
+ i++;
+ }
+
+ while (1) {
+ get_data(map_fd);
+ print_hist();
+ sleep(5);
+ }
+
+cleanup:
+ for (i--; i >= 0; i--)
+ bpf_link__destroy(links[i]);
+
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh
new file mode 100755
index 000000000..0eda9754f
--- /dev/null
+++ b/samples/bpf/lwt_len_hist.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+NS1=lwt_ns1
+VETH0=tst_lwt1a
+VETH1=tst_lwt1b
+
+TRACE_ROOT=/sys/kernel/debug/tracing
+
+function cleanup {
+ # To reset saved histogram, remove pinned map
+ rm /sys/fs/bpf/tc/globals/lwt_len_hist_map
+ ip route del 192.168.253.2/32 dev $VETH0 2> /dev/null
+ ip link del $VETH0 2> /dev/null
+ ip link del $VETH1 2> /dev/null
+ ip netns exec $NS1 killall netserver
+ ip netns delete $NS1 2> /dev/null
+}
+
+cleanup
+
+ip netns add $NS1
+ip link add $VETH0 type veth peer name $VETH1
+ip link set dev $VETH0 up
+ip addr add 192.168.253.1/24 dev $VETH0
+ip link set $VETH1 netns $NS1
+ip netns exec $NS1 ip link set dev $VETH1 up
+ip netns exec $NS1 ip addr add 192.168.253.2/24 dev $VETH1
+ip netns exec $NS1 netserver
+
+echo 1 > ${TRACE_ROOT}/tracing_on
+cp /dev/null ${TRACE_ROOT}/trace
+ip route add 192.168.253.2/32 encap bpf out obj lwt_len_hist_kern.o section len_hist dev $VETH0
+netperf -H 192.168.253.2 -t TCP_STREAM
+cat ${TRACE_ROOT}/trace | grep -v '^#'
+./lwt_len_hist
+cleanup
+echo 0 > ${TRACE_ROOT}/tracing_on
+
+exit 0
diff --git a/samples/bpf/lwt_len_hist_kern.c b/samples/bpf/lwt_len_hist_kern.c
new file mode 100644
index 000000000..1fa14c549
--- /dev/null
+++ b/samples/bpf/lwt_len_hist_kern.c
@@ -0,0 +1,75 @@
+/* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/in.h>
+#include <bpf/bpf_helpers.h>
+
+struct bpf_elf_map {
+ __u32 type;
+ __u32 size_key;
+ __u32 size_value;
+ __u32 max_elem;
+ __u32 flags;
+ __u32 id;
+ __u32 pinning;
+};
+
+struct bpf_elf_map SEC("maps") lwt_len_hist_map = {
+ .type = BPF_MAP_TYPE_PERCPU_HASH,
+ .size_key = sizeof(__u64),
+ .size_value = sizeof(__u64),
+ .pinning = 2,
+ .max_elem = 1024,
+};
+
+static unsigned int log2(unsigned int v)
+{
+ unsigned int r;
+ unsigned int shift;
+
+ r = (v > 0xFFFF) << 4; v >>= r;
+ shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
+ shift = (v > 0xF) << 2; v >>= shift; r |= shift;
+ shift = (v > 0x3) << 1; v >>= shift; r |= shift;
+ r |= (v >> 1);
+ return r;
+}
+
+static unsigned int log2l(unsigned long v)
+{
+ unsigned int hi = v >> 32;
+ if (hi)
+ return log2(hi) + 32;
+ else
+ return log2(v);
+}
+
+SEC("len_hist")
+int do_len_hist(struct __sk_buff *skb)
+{
+ __u64 *value, key, init_val = 1;
+
+ key = log2l(skb->len);
+
+ value = bpf_map_lookup_elem(&lwt_len_hist_map, &key);
+ if (value)
+ __sync_fetch_and_add(value, 1);
+ else
+ bpf_map_update_elem(&lwt_len_hist_map, &key, &init_val, BPF_ANY);
+
+ return BPF_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/lwt_len_hist_user.c b/samples/bpf/lwt_len_hist_user.c
new file mode 100644
index 000000000..430a4b7e3
--- /dev/null
+++ b/samples/bpf/lwt_len_hist_user.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/unistd.h>
+#include <linux/bpf.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <arpa/inet.h>
+
+#include <bpf/bpf.h>
+#include "bpf_util.h"
+
+#define MAX_INDEX 64
+#define MAX_STARS 38
+
+static void stars(char *str, long val, long max, int width)
+{
+ int i;
+
+ for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++)
+ str[i] = '*';
+ if (val > max)
+ str[i - 1] = '+';
+ str[i] = '\0';
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ const char *map_filename = "/sys/fs/bpf/tc/globals/lwt_len_hist_map";
+ uint64_t values[nr_cpus], sum, max_value = 0, data[MAX_INDEX] = {};
+ uint64_t key = 0, next_key, max_key = 0;
+ char starstr[MAX_STARS];
+ int i, map_fd;
+
+ map_fd = bpf_obj_get(map_filename);
+ if (map_fd < 0) {
+ fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
+ map_filename, strerror(errno), errno);
+ return -1;
+ }
+
+ while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) {
+ if (next_key >= MAX_INDEX) {
+ fprintf(stderr, "Key %lu out of bounds\n", next_key);
+ continue;
+ }
+
+ bpf_map_lookup_elem(map_fd, &next_key, values);
+
+ sum = 0;
+ for (i = 0; i < nr_cpus; i++)
+ sum += values[i];
+
+ data[next_key] = sum;
+ if (sum && next_key > max_key)
+ max_key = next_key;
+
+ if (sum > max_value)
+ max_value = sum;
+
+ key = next_key;
+ }
+
+ for (i = 1; i <= max_key + 1; i++) {
+ stars(starstr, data[i - 1], max_value, MAX_STARS);
+ printf("%8ld -> %-8ld : %-8ld |%-*s|\n",
+ (1l << i) >> 1, (1l << i) - 1, data[i - 1],
+ MAX_STARS, starstr);
+ }
+
+ close(map_fd);
+
+ return 0;
+}
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
new file mode 100644
index 000000000..7342c5b2f
--- /dev/null
+++ b/samples/bpf/map_perf_test_kern.c
@@ -0,0 +1,303 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "trace_common.h"
+
+#define MAX_ENTRIES 1000
+#define MAX_NR_CPUS 1024
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, MAX_ENTRIES);
+} hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 10000);
+} lru_hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 10000);
+ __uint(map_flags, BPF_F_NO_COMMON_LRU);
+} nocommon_lru_hash_map SEC(".maps");
+
+struct inner_lru {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, MAX_ENTRIES);
+ __uint(map_flags, BPF_F_NUMA_NODE);
+ __uint(numa_node, 0);
+} inner_lru_hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, MAX_NR_CPUS);
+ __uint(key_size, sizeof(u32));
+ __array(values, struct inner_lru); /* use inner_lru as inner map */
+} array_of_lru_hashs SEC(".maps") = {
+ /* statically initialize the first element */
+ .values = { &inner_lru_hash_map },
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(long));
+ __uint(max_entries, MAX_ENTRIES);
+} percpu_hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, MAX_ENTRIES);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} hash_map_alloc SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(long));
+ __uint(max_entries, MAX_ENTRIES);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} percpu_hash_map_alloc SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __uint(key_size, 8);
+ __uint(value_size, sizeof(long));
+ __uint(max_entries, 10000);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} lpm_trie_map_alloc SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, MAX_ENTRIES);
+} array_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, MAX_ENTRIES);
+} lru_hash_lookup_map SEC(".maps");
+
+SEC("kprobe/" SYSCALL(sys_getuid))
+int stress_hmap(struct pt_regs *ctx)
+{
+ u32 key = bpf_get_current_pid_tgid();
+ long init_val = 1;
+ long *value;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
+ value = bpf_map_lookup_elem(&hash_map, &key);
+ if (value)
+ bpf_map_delete_elem(&hash_map, &key);
+ }
+
+ return 0;
+}
+
+SEC("kprobe/" SYSCALL(sys_geteuid))
+int stress_percpu_hmap(struct pt_regs *ctx)
+{
+ u32 key = bpf_get_current_pid_tgid();
+ long init_val = 1;
+ long *value;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
+ value = bpf_map_lookup_elem(&percpu_hash_map, &key);
+ if (value)
+ bpf_map_delete_elem(&percpu_hash_map, &key);
+ }
+ return 0;
+}
+
+SEC("kprobe/" SYSCALL(sys_getgid))
+int stress_hmap_alloc(struct pt_regs *ctx)
+{
+ u32 key = bpf_get_current_pid_tgid();
+ long init_val = 1;
+ long *value;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
+ value = bpf_map_lookup_elem(&hash_map_alloc, &key);
+ if (value)
+ bpf_map_delete_elem(&hash_map_alloc, &key);
+ }
+ return 0;
+}
+
+SEC("kprobe/" SYSCALL(sys_getegid))
+int stress_percpu_hmap_alloc(struct pt_regs *ctx)
+{
+ u32 key = bpf_get_current_pid_tgid();
+ long init_val = 1;
+ long *value;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
+ value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
+ if (value)
+ bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
+ }
+ return 0;
+}
+
+SEC("kprobe/" SYSCALL(sys_connect))
+int stress_lru_hmap_alloc(struct pt_regs *ctx)
+{
+ struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
+ char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
+ union {
+ u16 dst6[8];
+ struct {
+ u16 magic0;
+ u16 magic1;
+ u16 tcase;
+ u16 unused16;
+ u32 unused32;
+ u32 key;
+ };
+ } test_params;
+ struct sockaddr_in6 *in6;
+ u16 test_case;
+ int addrlen, ret;
+ long val = 1;
+ u32 key = 0;
+
+ in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
+ addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
+
+ if (addrlen != sizeof(*in6))
+ return 0;
+
+ ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
+ &in6->sin6_addr);
+ if (ret)
+ goto done;
+
+ if (test_params.magic0 != 0xdead ||
+ test_params.magic1 != 0xbeef)
+ return 0;
+
+ test_case = test_params.tcase;
+ if (test_case != 3)
+ key = bpf_get_prandom_u32();
+
+ if (test_case == 0) {
+ ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
+ } else if (test_case == 1) {
+ ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
+ BPF_ANY);
+ } else if (test_case == 2) {
+ void *nolocal_lru_map;
+ int cpu = bpf_get_smp_processor_id();
+
+ nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
+ &cpu);
+ if (!nolocal_lru_map) {
+ ret = -ENOENT;
+ goto done;
+ }
+
+ ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
+ BPF_ANY);
+ } else if (test_case == 3) {
+ u32 i;
+
+ key = test_params.key;
+
+#pragma clang loop unroll(full)
+ for (i = 0; i < 32; i++) {
+ bpf_map_lookup_elem(&lru_hash_lookup_map, &key);
+ key++;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+done:
+ if (ret)
+ bpf_trace_printk(fmt, sizeof(fmt), ret);
+
+ return 0;
+}
+
+SEC("kprobe/" SYSCALL(sys_gettid))
+int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
+{
+ union {
+ u32 b32[2];
+ u8 b8[8];
+ } key;
+ unsigned int i;
+
+ key.b32[0] = 32;
+ key.b8[4] = 192;
+ key.b8[5] = 168;
+ key.b8[6] = 0;
+ key.b8[7] = 1;
+
+#pragma clang loop unroll(full)
+ for (i = 0; i < 32; ++i)
+ bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
+
+ return 0;
+}
+
+SEC("kprobe/" SYSCALL(sys_getpgid))
+int stress_hash_map_lookup(struct pt_regs *ctx)
+{
+ u32 key = 1, i;
+ long *value;
+
+#pragma clang loop unroll(full)
+ for (i = 0; i < 64; ++i)
+ value = bpf_map_lookup_elem(&hash_map, &key);
+
+ return 0;
+}
+
+SEC("kprobe/" SYSCALL(sys_getppid))
+int stress_array_map_lookup(struct pt_regs *ctx)
+{
+ u32 key = 1, i;
+ long *value;
+
+#pragma clang loop unroll(full)
+ for (i = 0; i < 64; ++i)
+ value = bpf_map_lookup_elem(&array_map, &key);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
new file mode 100644
index 000000000..1bb53f4b2
--- /dev/null
+++ b/samples/bpf/map_perf_test_user.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 Facebook
+ */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <asm/unistd.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+#include <arpa/inet.h>
+#include <errno.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#define TEST_BIT(t) (1U << (t))
+#define MAX_NR_CPUS 1024
+
+static __u64 time_get_ns(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+enum test_type {
+ HASH_PREALLOC,
+ PERCPU_HASH_PREALLOC,
+ HASH_KMALLOC,
+ PERCPU_HASH_KMALLOC,
+ LRU_HASH_PREALLOC,
+ NOCOMMON_LRU_HASH_PREALLOC,
+ LPM_KMALLOC,
+ HASH_LOOKUP,
+ ARRAY_LOOKUP,
+ INNER_LRU_HASH_PREALLOC,
+ LRU_HASH_LOOKUP,
+ NR_TESTS,
+};
+
+const char *test_map_names[NR_TESTS] = {
+ [HASH_PREALLOC] = "hash_map",
+ [PERCPU_HASH_PREALLOC] = "percpu_hash_map",
+ [HASH_KMALLOC] = "hash_map_alloc",
+ [PERCPU_HASH_KMALLOC] = "percpu_hash_map_alloc",
+ [LRU_HASH_PREALLOC] = "lru_hash_map",
+ [NOCOMMON_LRU_HASH_PREALLOC] = "nocommon_lru_hash_map",
+ [LPM_KMALLOC] = "lpm_trie_map_alloc",
+ [HASH_LOOKUP] = "hash_map",
+ [ARRAY_LOOKUP] = "array_map",
+ [INNER_LRU_HASH_PREALLOC] = "inner_lru_hash_map",
+ [LRU_HASH_LOOKUP] = "lru_hash_lookup_map",
+};
+
+enum map_idx {
+ array_of_lru_hashs_idx,
+ hash_map_alloc_idx,
+ lru_hash_lookup_idx,
+ NR_IDXES,
+};
+
+static int map_fd[NR_IDXES];
+
+static int test_flags = ~0;
+static uint32_t num_map_entries;
+static uint32_t inner_lru_hash_size;
+static int lru_hash_lookup_test_entries = 32;
+static uint32_t max_cnt = 10000;
+
+static int check_test_flags(enum test_type t)
+{
+ return test_flags & TEST_BIT(t);
+}
+
+static void test_hash_prealloc(int cpu)
+{
+ __u64 start_time;
+ int i;
+
+ start_time = time_get_ns();
+ for (i = 0; i < max_cnt; i++)
+ syscall(__NR_getuid);
+ printf("%d:hash_map_perf pre-alloc %lld events per sec\n",
+ cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static int pre_test_lru_hash_lookup(int tasks)
+{
+ int fd = map_fd[lru_hash_lookup_idx];
+ uint32_t key;
+ long val = 1;
+ int ret;
+
+ if (num_map_entries > lru_hash_lookup_test_entries)
+ lru_hash_lookup_test_entries = num_map_entries;
+
+ /* Populate the lru_hash_map for LRU_HASH_LOOKUP perf test.
+ *
+ * It is fine that the user requests for a map with
+ * num_map_entries < 32 and some of the later lru hash lookup
+ * may return not found. For LRU map, we are not interested
+ * in such small map performance.
+ */
+ for (key = 0; key < lru_hash_lookup_test_entries; key++) {
+ ret = bpf_map_update_elem(fd, &key, &val, BPF_NOEXIST);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void do_test_lru(enum test_type test, int cpu)
+{
+ static int inner_lru_map_fds[MAX_NR_CPUS];
+
+ struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 };
+ const char *test_name;
+ __u64 start_time;
+ int i, ret;
+
+ if (test == INNER_LRU_HASH_PREALLOC && cpu) {
+ /* If CPU is not 0, create inner_lru hash map and insert the fd
+ * value into the array_of_lru_hash map. In case of CPU 0,
+ * 'inner_lru_hash_map' was statically inserted on the map init
+ */
+ int outer_fd = map_fd[array_of_lru_hashs_idx];
+ unsigned int mycpu, mynode;
+ LIBBPF_OPTS(bpf_map_create_opts, opts,
+ .map_flags = BPF_F_NUMA_NODE,
+ );
+
+ assert(cpu < MAX_NR_CPUS);
+
+ ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL);
+ assert(!ret);
+
+ opts.numa_node = mynode;
+ inner_lru_map_fds[cpu] =
+ bpf_map_create(BPF_MAP_TYPE_LRU_HASH,
+ test_map_names[INNER_LRU_HASH_PREALLOC],
+ sizeof(uint32_t),
+ sizeof(long),
+ inner_lru_hash_size, &opts);
+ if (inner_lru_map_fds[cpu] == -1) {
+ printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n",
+ strerror(errno), errno);
+ exit(1);
+ }
+
+ ret = bpf_map_update_elem(outer_fd, &cpu,
+ &inner_lru_map_fds[cpu],
+ BPF_ANY);
+ if (ret) {
+ printf("cannot update ARRAY_OF_LRU_HASHS with key:%u. %s(%d)\n",
+ cpu, strerror(errno), errno);
+ exit(1);
+ }
+ }
+
+ in6.sin6_addr.s6_addr16[0] = 0xdead;
+ in6.sin6_addr.s6_addr16[1] = 0xbeef;
+
+ if (test == LRU_HASH_PREALLOC) {
+ test_name = "lru_hash_map_perf";
+ in6.sin6_addr.s6_addr16[2] = 0;
+ } else if (test == NOCOMMON_LRU_HASH_PREALLOC) {
+ test_name = "nocommon_lru_hash_map_perf";
+ in6.sin6_addr.s6_addr16[2] = 1;
+ } else if (test == INNER_LRU_HASH_PREALLOC) {
+ test_name = "inner_lru_hash_map_perf";
+ in6.sin6_addr.s6_addr16[2] = 2;
+ } else if (test == LRU_HASH_LOOKUP) {
+ test_name = "lru_hash_lookup_perf";
+ in6.sin6_addr.s6_addr16[2] = 3;
+ in6.sin6_addr.s6_addr32[3] = 0;
+ } else {
+ assert(0);
+ }
+
+ start_time = time_get_ns();
+ for (i = 0; i < max_cnt; i++) {
+ ret = connect(-1, (const struct sockaddr *)&in6, sizeof(in6));
+ assert(ret == -1 && errno == EBADF);
+ if (in6.sin6_addr.s6_addr32[3] <
+ lru_hash_lookup_test_entries - 32)
+ in6.sin6_addr.s6_addr32[3] += 32;
+ else
+ in6.sin6_addr.s6_addr32[3] = 0;
+ }
+ printf("%d:%s pre-alloc %lld events per sec\n",
+ cpu, test_name,
+ max_cnt * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void test_lru_hash_prealloc(int cpu)
+{
+ do_test_lru(LRU_HASH_PREALLOC, cpu);
+}
+
+static void test_nocommon_lru_hash_prealloc(int cpu)
+{
+ do_test_lru(NOCOMMON_LRU_HASH_PREALLOC, cpu);
+}
+
+static void test_inner_lru_hash_prealloc(int cpu)
+{
+ do_test_lru(INNER_LRU_HASH_PREALLOC, cpu);
+}
+
+static void test_lru_hash_lookup(int cpu)
+{
+ do_test_lru(LRU_HASH_LOOKUP, cpu);
+}
+
+static void test_percpu_hash_prealloc(int cpu)
+{
+ __u64 start_time;
+ int i;
+
+ start_time = time_get_ns();
+ for (i = 0; i < max_cnt; i++)
+ syscall(__NR_geteuid);
+ printf("%d:percpu_hash_map_perf pre-alloc %lld events per sec\n",
+ cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void test_hash_kmalloc(int cpu)
+{
+ __u64 start_time;
+ int i;
+
+ start_time = time_get_ns();
+ for (i = 0; i < max_cnt; i++)
+ syscall(__NR_getgid);
+ printf("%d:hash_map_perf kmalloc %lld events per sec\n",
+ cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void test_percpu_hash_kmalloc(int cpu)
+{
+ __u64 start_time;
+ int i;
+
+ start_time = time_get_ns();
+ for (i = 0; i < max_cnt; i++)
+ syscall(__NR_getegid);
+ printf("%d:percpu_hash_map_perf kmalloc %lld events per sec\n",
+ cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void test_lpm_kmalloc(int cpu)
+{
+ __u64 start_time;
+ int i;
+
+ start_time = time_get_ns();
+ for (i = 0; i < max_cnt; i++)
+ syscall(__NR_gettid);
+ printf("%d:lpm_perf kmalloc %lld events per sec\n",
+ cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void test_hash_lookup(int cpu)
+{
+ __u64 start_time;
+ int i;
+
+ start_time = time_get_ns();
+ for (i = 0; i < max_cnt; i++)
+ syscall(__NR_getpgid, 0);
+ printf("%d:hash_lookup %lld lookups per sec\n",
+ cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
+}
+
+static void test_array_lookup(int cpu)
+{
+ __u64 start_time;
+ int i;
+
+ start_time = time_get_ns();
+ for (i = 0; i < max_cnt; i++)
+ syscall(__NR_getppid, 0);
+ printf("%d:array_lookup %lld lookups per sec\n",
+ cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
+}
+
+typedef int (*pre_test_func)(int tasks);
+const pre_test_func pre_test_funcs[] = {
+ [LRU_HASH_LOOKUP] = pre_test_lru_hash_lookup,
+};
+
+typedef void (*test_func)(int cpu);
+const test_func test_funcs[] = {
+ [HASH_PREALLOC] = test_hash_prealloc,
+ [PERCPU_HASH_PREALLOC] = test_percpu_hash_prealloc,
+ [HASH_KMALLOC] = test_hash_kmalloc,
+ [PERCPU_HASH_KMALLOC] = test_percpu_hash_kmalloc,
+ [LRU_HASH_PREALLOC] = test_lru_hash_prealloc,
+ [NOCOMMON_LRU_HASH_PREALLOC] = test_nocommon_lru_hash_prealloc,
+ [LPM_KMALLOC] = test_lpm_kmalloc,
+ [HASH_LOOKUP] = test_hash_lookup,
+ [ARRAY_LOOKUP] = test_array_lookup,
+ [INNER_LRU_HASH_PREALLOC] = test_inner_lru_hash_prealloc,
+ [LRU_HASH_LOOKUP] = test_lru_hash_lookup,
+};
+
+static int pre_test(int tasks)
+{
+ int i;
+
+ for (i = 0; i < NR_TESTS; i++) {
+ if (pre_test_funcs[i] && check_test_flags(i)) {
+ int ret = pre_test_funcs[i](tasks);
+
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void loop(int cpu)
+{
+ cpu_set_t cpuset;
+ int i;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ sched_setaffinity(0, sizeof(cpuset), &cpuset);
+
+ for (i = 0; i < NR_TESTS; i++) {
+ if (check_test_flags(i))
+ test_funcs[i](cpu);
+ }
+}
+
+static void run_perf_test(int tasks)
+{
+ pid_t pid[tasks];
+ int i;
+
+ assert(!pre_test(tasks));
+
+ for (i = 0; i < tasks; i++) {
+ pid[i] = fork();
+ if (pid[i] == 0) {
+ loop(i);
+ exit(0);
+ } else if (pid[i] == -1) {
+ printf("couldn't spawn #%d process\n", i);
+ exit(1);
+ }
+ }
+ for (i = 0; i < tasks; i++) {
+ int status;
+
+ assert(waitpid(pid[i], &status, 0) == pid[i]);
+ assert(status == 0);
+ }
+}
+
+static void fill_lpm_trie(void)
+{
+ struct bpf_lpm_trie_key *key;
+ unsigned long value = 0;
+ unsigned int i;
+ int r;
+
+ key = alloca(sizeof(*key) + 4);
+ key->prefixlen = 32;
+
+ for (i = 0; i < 512; ++i) {
+ key->prefixlen = rand() % 33;
+ key->data[0] = rand() & 0xff;
+ key->data[1] = rand() & 0xff;
+ key->data[2] = rand() & 0xff;
+ key->data[3] = rand() & 0xff;
+ r = bpf_map_update_elem(map_fd[hash_map_alloc_idx],
+ key, &value, 0);
+ assert(!r);
+ }
+
+ key->prefixlen = 32;
+ key->data[0] = 192;
+ key->data[1] = 168;
+ key->data[2] = 0;
+ key->data[3] = 1;
+ value = 128;
+
+ r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], key, &value, 0);
+ assert(!r);
+}
+
+static void fixup_map(struct bpf_object *obj)
+{
+ struct bpf_map *map;
+ int i;
+
+ bpf_object__for_each_map(map, obj) {
+ const char *name = bpf_map__name(map);
+
+ /* Only change the max_entries for the enabled test(s) */
+ for (i = 0; i < NR_TESTS; i++) {
+ if (!strcmp(test_map_names[i], name) &&
+ (check_test_flags(i))) {
+ bpf_map__set_max_entries(map, num_map_entries);
+ continue;
+ }
+ }
+ }
+
+ inner_lru_hash_size = num_map_entries;
+}
+
+int main(int argc, char **argv)
+{
+ int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ struct bpf_link *links[8];
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ char filename[256];
+ int i = 0;
+
+ if (argc > 1)
+ test_flags = atoi(argv[1]) ? : test_flags;
+
+ if (argc > 2)
+ nr_cpus = atoi(argv[2]) ? : nr_cpus;
+
+ if (argc > 3)
+ num_map_entries = atoi(argv[3]);
+
+ if (argc > 4)
+ max_cnt = atoi(argv[4]);
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ map = bpf_object__find_map_by_name(obj, "inner_lru_hash_map");
+ if (libbpf_get_error(map)) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ inner_lru_hash_size = bpf_map__max_entries(map);
+ if (!inner_lru_hash_size) {
+ fprintf(stderr, "ERROR: failed to get map attribute\n");
+ goto cleanup;
+ }
+
+ /* resize BPF map prior to loading */
+ if (num_map_entries > 0)
+ fixup_map(obj);
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "array_of_lru_hashs");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "hash_map_alloc");
+ map_fd[2] = bpf_object__find_map_fd_by_name(obj, "lru_hash_lookup_map");
+ if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ links[i] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[i])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[i] = NULL;
+ goto cleanup;
+ }
+ i++;
+ }
+
+ fill_lpm_trie();
+
+ run_perf_test(nr_cpus);
+
+cleanup:
+ for (i--; i >= 0; i--)
+ bpf_link__destroy(links[i]);
+
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime_kern.c
new file mode 100644
index 000000000..eb4d94742
--- /dev/null
+++ b/samples/bpf/offwaketime_kern.c
@@ -0,0 +1,158 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/ptrace.h>
+#include <uapi/linux/perf_event.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define _(P) \
+ ({ \
+ typeof(P) val; \
+ bpf_probe_read_kernel(&val, sizeof(val), &(P)); \
+ val; \
+ })
+
+#define MINBLOCK_US 1
+#define MAX_ENTRIES 10000
+
+struct key_t {
+ char waker[TASK_COMM_LEN];
+ char target[TASK_COMM_LEN];
+ u32 wret;
+ u32 tret;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct key_t);
+ __type(value, u64);
+ __uint(max_entries, MAX_ENTRIES);
+} counts SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_ENTRIES);
+} start SEC(".maps");
+
+struct wokeby_t {
+ char name[TASK_COMM_LEN];
+ u32 ret;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, struct wokeby_t);
+ __uint(max_entries, MAX_ENTRIES);
+} wokeby SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
+ __uint(max_entries, MAX_ENTRIES);
+} stackmap SEC(".maps");
+
+#define STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
+
+SEC("kprobe/try_to_wake_up")
+int waker(struct pt_regs *ctx)
+{
+ struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
+ struct wokeby_t woke;
+ u32 pid;
+
+ pid = _(p->pid);
+
+ bpf_get_current_comm(&woke.name, sizeof(woke.name));
+ woke.ret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
+
+ bpf_map_update_elem(&wokeby, &pid, &woke, BPF_ANY);
+ return 0;
+}
+
+static inline int update_counts(void *ctx, u32 pid, u64 delta)
+{
+ struct wokeby_t *woke;
+ u64 zero = 0, *val;
+ struct key_t key;
+
+ __builtin_memset(&key.waker, 0, sizeof(key.waker));
+ bpf_get_current_comm(&key.target, sizeof(key.target));
+ key.tret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
+ key.wret = 0;
+
+ woke = bpf_map_lookup_elem(&wokeby, &pid);
+ if (woke) {
+ key.wret = woke->ret;
+ __builtin_memcpy(&key.waker, woke->name, sizeof(key.waker));
+ bpf_map_delete_elem(&wokeby, &pid);
+ }
+
+ val = bpf_map_lookup_elem(&counts, &key);
+ if (!val) {
+ bpf_map_update_elem(&counts, &key, &zero, BPF_NOEXIST);
+ val = bpf_map_lookup_elem(&counts, &key);
+ if (!val)
+ return 0;
+ }
+ (*val) += delta;
+ return 0;
+}
+
+#if 1
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+ unsigned long long pad;
+ char prev_comm[TASK_COMM_LEN];
+ int prev_pid;
+ int prev_prio;
+ long long prev_state;
+ char next_comm[TASK_COMM_LEN];
+ int next_pid;
+ int next_prio;
+};
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+ /* record previous thread sleep time */
+ u32 pid = ctx->prev_pid;
+#else
+SEC("kprobe/finish_task_switch")
+int oncpu(struct pt_regs *ctx)
+{
+ struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
+ /* record previous thread sleep time */
+ u32 pid = _(p->pid);
+#endif
+ u64 delta, ts, *tsp;
+
+ ts = bpf_ktime_get_ns();
+ bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);
+
+ /* calculate current thread's delta time */
+ pid = bpf_get_current_pid_tgid();
+ tsp = bpf_map_lookup_elem(&start, &pid);
+ if (!tsp)
+ /* missed start or filtered */
+ return 0;
+
+ delta = bpf_ktime_get_ns() - *tsp;
+ bpf_map_delete_elem(&start, &pid);
+ delta = delta / 1000;
+ if (delta < MINBLOCK_US)
+ return 0;
+
+ return update_counts(ctx, pid, delta);
+}
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/offwaketime_user.c b/samples/bpf/offwaketime_user.c
new file mode 100644
index 000000000..b6eedcb98
--- /dev/null
+++ b/samples/bpf/offwaketime_user.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 Facebook
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <linux/perf_event.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+#include "trace_helpers.h"
+
+#define PRINT_RAW_ADDR 0
+
+/* counts, stackmap */
+static int map_fd[2];
+
+static void print_ksym(__u64 addr)
+{
+ struct ksym *sym;
+
+ if (!addr)
+ return;
+ sym = ksym_search(addr);
+ if (!sym) {
+ printf("ksym not found. Is kallsyms loaded?\n");
+ return;
+ }
+
+ if (PRINT_RAW_ADDR)
+ printf("%s/%llx;", sym->name, addr);
+ else
+ printf("%s;", sym->name);
+}
+
+#define TASK_COMM_LEN 16
+
+struct key_t {
+ char waker[TASK_COMM_LEN];
+ char target[TASK_COMM_LEN];
+ __u32 wret;
+ __u32 tret;
+};
+
+static void print_stack(struct key_t *key, __u64 count)
+{
+ __u64 ip[PERF_MAX_STACK_DEPTH] = {};
+ static bool warned;
+ int i;
+
+ printf("%s;", key->target);
+ if (bpf_map_lookup_elem(map_fd[1], &key->tret, ip) != 0) {
+ printf("---;");
+ } else {
+ for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
+ print_ksym(ip[i]);
+ }
+ printf("-;");
+ if (bpf_map_lookup_elem(map_fd[1], &key->wret, ip) != 0) {
+ printf("---;");
+ } else {
+ for (i = 0; i < PERF_MAX_STACK_DEPTH; i++)
+ print_ksym(ip[i]);
+ }
+ printf(";%s %lld\n", key->waker, count);
+
+ if ((key->tret == -EEXIST || key->wret == -EEXIST) && !warned) {
+ printf("stackmap collisions seen. Consider increasing size\n");
+ warned = true;
+ } else if (((int)(key->tret) < 0 || (int)(key->wret) < 0)) {
+ printf("err stackid %d %d\n", key->tret, key->wret);
+ }
+}
+
+static void print_stacks(int fd)
+{
+ struct key_t key = {}, next_key;
+ __u64 value;
+
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ bpf_map_lookup_elem(fd, &next_key, &value);
+ print_stack(&next_key, value);
+ key = next_key;
+ }
+}
+
+static void int_exit(int sig)
+{
+ print_stacks(map_fd[0]);
+ exit(0);
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_object *obj = NULL;
+ struct bpf_link *links[2];
+ struct bpf_program *prog;
+ int delay = 1, i = 0;
+ char filename[256];
+
+ if (load_kallsyms()) {
+ printf("failed to process /proc/kallsyms\n");
+ return 2;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ obj = NULL;
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap");
+ if (map_fd[0] < 0 || map_fd[1] < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ bpf_object__for_each_program(prog, obj) {
+ links[i] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[i])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[i] = NULL;
+ goto cleanup;
+ }
+ i++;
+ }
+
+ if (argc > 1)
+ delay = atoi(argv[1]);
+ sleep(delay);
+ print_stacks(map_fd[0]);
+
+cleanup:
+ for (i--; i >= 0; i--)
+ bpf_link__destroy(links[i]);
+
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c
new file mode 100644
index 000000000..c6f65f90a
--- /dev/null
+++ b/samples/bpf/parse_ldabs.c
@@ -0,0 +1,43 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define KBUILD_MODNAME "foo"
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+#define DEFAULT_PKTGEN_UDP_PORT 9
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1FFF
+
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
+{
+ return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
+ & (IP_MF | IP_OFFSET);
+}
+
+SEC("ldabs")
+int handle_ingress(struct __sk_buff *skb)
+{
+ __u64 troff = ETH_HLEN + sizeof(struct iphdr);
+
+ if (load_half(skb, offsetof(struct ethhdr, h_proto)) != ETH_P_IP)
+ return 0;
+ if (load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)) != IPPROTO_UDP ||
+ load_byte(skb, ETH_HLEN) != 0x45)
+ return 0;
+ if (ip_is_fragment(skb, ETH_HLEN))
+ return 0;
+ if (load_half(skb, troff + offsetof(struct udphdr, dest)) == DEFAULT_PKTGEN_UDP_PORT)
+ return TC_ACT_SHOT;
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/parse_simple.c b/samples/bpf/parse_simple.c
new file mode 100644
index 000000000..4a486cb1e
--- /dev/null
+++ b/samples/bpf/parse_simple.c
@@ -0,0 +1,49 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define KBUILD_MODNAME "foo"
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <uapi/linux/bpf.h>
+#include <net/ip.h>
+#include <bpf/bpf_helpers.h>
+
+#define DEFAULT_PKTGEN_UDP_PORT 9
+
+/* copy of 'struct ethhdr' without __packed */
+struct eth_hdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ unsigned short h_proto;
+};
+
+SEC("simple")
+int handle_ingress(struct __sk_buff *skb)
+{
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ struct iphdr *iph = data + sizeof(*eth);
+ struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+
+ /* single length check */
+ if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end)
+ return 0;
+
+ if (eth->h_proto != htons(ETH_P_IP))
+ return 0;
+ if (iph->protocol != IPPROTO_UDP || iph->ihl != 5)
+ return 0;
+ if (ip_is_fragment(iph))
+ return 0;
+ if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT))
+ return TC_ACT_SHOT;
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
new file mode 100644
index 000000000..d8623846e
--- /dev/null
+++ b/samples/bpf/parse_varlen.c
@@ -0,0 +1,150 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define KBUILD_MODNAME "foo"
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <uapi/linux/bpf.h>
+#include <net/ip.h>
+#include <bpf/bpf_helpers.h>
+
+#define DEFAULT_PKTGEN_UDP_PORT 9
+#define DEBUG 0
+
+static int tcp(void *data, uint64_t tp_off, void *data_end)
+{
+ struct tcphdr *tcp = data + tp_off;
+
+ if (tcp + 1 > data_end)
+ return 0;
+ if (tcp->dest == htons(80) || tcp->source == htons(80))
+ return TC_ACT_SHOT;
+ return 0;
+}
+
+static int udp(void *data, uint64_t tp_off, void *data_end)
+{
+ struct udphdr *udp = data + tp_off;
+
+ if (udp + 1 > data_end)
+ return 0;
+ if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT) ||
+ udp->source == htons(DEFAULT_PKTGEN_UDP_PORT)) {
+ if (DEBUG) {
+ char fmt[] = "udp port 9 indeed\n";
+
+ bpf_trace_printk(fmt, sizeof(fmt));
+ }
+ return TC_ACT_SHOT;
+ }
+ return 0;
+}
+
+static int parse_ipv4(void *data, uint64_t nh_off, void *data_end)
+{
+ struct iphdr *iph;
+ uint64_t ihl_len;
+
+ iph = data + nh_off;
+ if (iph + 1 > data_end)
+ return 0;
+
+ if (ip_is_fragment(iph))
+ return 0;
+ ihl_len = iph->ihl * 4;
+
+ if (iph->protocol == IPPROTO_IPIP) {
+ iph = data + nh_off + ihl_len;
+ if (iph + 1 > data_end)
+ return 0;
+ ihl_len += iph->ihl * 4;
+ }
+
+ if (iph->protocol == IPPROTO_TCP)
+ return tcp(data, nh_off + ihl_len, data_end);
+ else if (iph->protocol == IPPROTO_UDP)
+ return udp(data, nh_off + ihl_len, data_end);
+ return 0;
+}
+
+static int parse_ipv6(void *data, uint64_t nh_off, void *data_end)
+{
+ struct ipv6hdr *ip6h;
+ struct iphdr *iph;
+ uint64_t ihl_len = sizeof(struct ipv6hdr);
+ uint64_t nexthdr;
+
+ ip6h = data + nh_off;
+ if (ip6h + 1 > data_end)
+ return 0;
+
+ nexthdr = ip6h->nexthdr;
+
+ if (nexthdr == IPPROTO_IPIP) {
+ iph = data + nh_off + ihl_len;
+ if (iph + 1 > data_end)
+ return 0;
+ ihl_len += iph->ihl * 4;
+ nexthdr = iph->protocol;
+ } else if (nexthdr == IPPROTO_IPV6) {
+ ip6h = data + nh_off + ihl_len;
+ if (ip6h + 1 > data_end)
+ return 0;
+ ihl_len += sizeof(struct ipv6hdr);
+ nexthdr = ip6h->nexthdr;
+ }
+
+ if (nexthdr == IPPROTO_TCP)
+ return tcp(data, nh_off + ihl_len, data_end);
+ else if (nexthdr == IPPROTO_UDP)
+ return udp(data, nh_off + ihl_len, data_end);
+ return 0;
+}
+
+SEC("varlen")
+int handle_ingress(struct __sk_buff *skb)
+{
+ void *data = (void *)(long)skb->data;
+ struct ethhdr *eth = data;
+ void *data_end = (void *)(long)skb->data_end;
+ uint64_t h_proto, nh_off;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return 0;
+
+ h_proto = eth->h_proto;
+
+ if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) {
+ struct vlan_hdr *vhdr;
+
+ vhdr = data + nh_off;
+ nh_off += sizeof(struct vlan_hdr);
+ if (data + nh_off > data_end)
+ return 0;
+ h_proto = vhdr->h_vlan_encapsulated_proto;
+ }
+ if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) {
+ struct vlan_hdr *vhdr;
+
+ vhdr = data + nh_off;
+ nh_off += sizeof(struct vlan_hdr);
+ if (data + nh_off > data_end)
+ return 0;
+ h_proto = vhdr->h_vlan_encapsulated_proto;
+ }
+ if (h_proto == htons(ETH_P_IP))
+ return parse_ipv4(data, nh_off, data_end);
+ else if (h_proto == htons(ETH_P_IPV6))
+ return parse_ipv6(data, nh_off, data_end);
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/run_cookie_uid_helper_example.sh b/samples/bpf/run_cookie_uid_helper_example.sh
new file mode 100755
index 000000000..fc6bc0451
--- /dev/null
+++ b/samples/bpf/run_cookie_uid_helper_example.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+local_dir="$(pwd)"
+root_dir=$local_dir/../..
+mnt_dir=$(mktemp -d --tmp)
+
+on_exit() {
+ iptables -D OUTPUT -m bpf --object-pinned ${mnt_dir}/bpf_prog -j ACCEPT
+ umount ${mnt_dir}
+ rm -r ${mnt_dir}
+}
+
+trap on_exit EXIT
+mount -t bpf bpf ${mnt_dir}
+./per_socket_stats_example ${mnt_dir}/bpf_prog $1
diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c
new file mode 100644
index 000000000..a3f8a3998
--- /dev/null
+++ b/samples/bpf/sampleip_kern.c
@@ -0,0 +1,38 @@
+/* Copyright 2016 Netflix, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/bpf_perf_event.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define MAX_IPS 8192
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u64);
+ __type(value, u32);
+ __uint(max_entries, MAX_IPS);
+} ip_map SEC(".maps");
+
+SEC("perf_event")
+int do_sample(struct bpf_perf_event_data *ctx)
+{
+ u64 ip;
+ u32 *value, init_val = 1;
+
+ ip = PT_REGS_IP(&ctx->regs);
+ value = bpf_map_lookup_elem(&ip_map, &ip);
+ if (value)
+ *value += 1;
+ else
+ /* E2BIG not tested for this example only */
+ bpf_map_update_elem(&ip_map, &ip, &init_val, BPF_NOEXIST);
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c
new file mode 100644
index 000000000..921c505bb
--- /dev/null
+++ b/samples/bpf/sampleip_user.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sampleip: sample instruction pointer and frequency count in a BPF map.
+ *
+ * Copyright 2016 Netflix, Inc.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "perf-sys.h"
+#include "trace_helpers.h"
+
+#define DEFAULT_FREQ 99
+#define DEFAULT_SECS 5
+#define MAX_IPS 8192
+#define PAGE_OFFSET 0xffff880000000000
+
+static int map_fd;
+static int nr_cpus;
+
+static void usage(void)
+{
+ printf("USAGE: sampleip [-F freq] [duration]\n");
+ printf(" -F freq # sample frequency (Hertz), default 99\n");
+ printf(" duration # sampling duration (seconds), default 5\n");
+}
+
+static int sampling_start(int freq, struct bpf_program *prog,
+ struct bpf_link *links[])
+{
+ int i, pmu_fd;
+
+ struct perf_event_attr pe_sample_attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .freq = 1,
+ .sample_period = freq,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ .inherit = 1,
+ };
+
+ for (i = 0; i < nr_cpus; i++) {
+ pmu_fd = sys_perf_event_open(&pe_sample_attr, -1 /* pid */, i,
+ -1 /* group_fd */, 0 /* flags */);
+ if (pmu_fd < 0) {
+ fprintf(stderr, "ERROR: Initializing perf sampling\n");
+ return 1;
+ }
+ links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
+ if (libbpf_get_error(links[i])) {
+ fprintf(stderr, "ERROR: Attach perf event\n");
+ links[i] = NULL;
+ close(pmu_fd);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void sampling_end(struct bpf_link *links[])
+{
+ int i;
+
+ for (i = 0; i < nr_cpus; i++)
+ bpf_link__destroy(links[i]);
+}
+
+struct ipcount {
+ __u64 ip;
+ __u32 count;
+};
+
+/* used for sorting */
+struct ipcount counts[MAX_IPS];
+
+static int count_cmp(const void *p1, const void *p2)
+{
+ return ((struct ipcount *)p1)->count - ((struct ipcount *)p2)->count;
+}
+
+static void print_ip_map(int fd)
+{
+ struct ksym *sym;
+ __u64 key, next_key;
+ __u32 value;
+ int i, max;
+
+ printf("%-19s %-32s %s\n", "ADDR", "KSYM", "COUNT");
+
+ /* fetch IPs and counts */
+ key = 0, i = 0;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ bpf_map_lookup_elem(fd, &next_key, &value);
+ counts[i].ip = next_key;
+ counts[i++].count = value;
+ key = next_key;
+ }
+ max = i;
+
+ /* sort and print */
+ qsort(counts, max, sizeof(struct ipcount), count_cmp);
+ for (i = 0; i < max; i++) {
+ if (counts[i].ip > PAGE_OFFSET) {
+ sym = ksym_search(counts[i].ip);
+ if (!sym) {
+ printf("ksym not found. Is kallsyms loaded?\n");
+ continue;
+ }
+
+ printf("0x%-17llx %-32s %u\n", counts[i].ip, sym->name,
+ counts[i].count);
+ } else {
+ printf("0x%-17llx %-32s %u\n", counts[i].ip, "(user)",
+ counts[i].count);
+ }
+ }
+
+ if (max == MAX_IPS) {
+ printf("WARNING: IP hash was full (max %d entries); ", max);
+ printf("may have dropped samples\n");
+ }
+}
+
+static void int_exit(int sig)
+{
+ printf("\n");
+ print_ip_map(map_fd);
+ exit(0);
+}
+
+int main(int argc, char **argv)
+{
+ int opt, freq = DEFAULT_FREQ, secs = DEFAULT_SECS, error = 1;
+ struct bpf_object *obj = NULL;
+ struct bpf_program *prog;
+ struct bpf_link **links;
+ char filename[256];
+
+ /* process arguments */
+ while ((opt = getopt(argc, argv, "F:h")) != -1) {
+ switch (opt) {
+ case 'F':
+ freq = atoi(optarg);
+ break;
+ case 'h':
+ default:
+ usage();
+ return 0;
+ }
+ }
+ if (argc - optind == 1)
+ secs = atoi(argv[optind]);
+ if (freq == 0 || secs == 0) {
+ usage();
+ return 1;
+ }
+
+ /* initialize kernel symbol translation */
+ if (load_kallsyms()) {
+ fprintf(stderr, "ERROR: loading /proc/kallsyms\n");
+ return 2;
+ }
+
+ /* create perf FDs for each CPU */
+ nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ links = calloc(nr_cpus, sizeof(struct bpf_link *));
+ if (!links) {
+ fprintf(stderr, "ERROR: malloc of links\n");
+ goto cleanup;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ obj = NULL;
+ goto cleanup;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "do_sample");
+ if (!prog) {
+ fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "ip_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ /* do sampling */
+ printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
+ freq, secs);
+ if (sampling_start(freq, prog, links) != 0)
+ goto cleanup;
+
+ sleep(secs);
+ error = 0;
+
+cleanup:
+ sampling_end(links);
+ /* output sample counts */
+ if (!error)
+ print_ip_map(map_fd);
+
+ free(links);
+ bpf_object__close(obj);
+ return error;
+}
diff --git a/samples/bpf/sock_example.c b/samples/bpf/sock_example.c
new file mode 100644
index 000000000..5b66f2401
--- /dev/null
+++ b/samples/bpf/sock_example.c
@@ -0,0 +1,111 @@
+/* eBPF example program:
+ * - creates arraymap in kernel with key 4 bytes and value 8 bytes
+ *
+ * - loads eBPF program:
+ * r0 = skb->data[ETH_HLEN + offsetof(struct iphdr, protocol)];
+ * *(u32*)(fp - 4) = r0;
+ * // assuming packet is IPv4, lookup ip->proto in a map
+ * value = bpf_map_lookup_elem(map_fd, fp - 4);
+ * if (value)
+ * (*(u64*)value) += 1;
+ *
+ * - attaches this program to loopback interface "lo" raw socket
+ *
+ * - every second user space reads map[tcp], map[udp], map[icmp] to see
+ * how many packets of given protocol were seen on "lo"
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+#include <linux/bpf.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <stddef.h>
+#include <bpf/bpf.h>
+#include "bpf_insn.h"
+#include "sock_example.h"
+#include "bpf_util.h"
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+static int test_sock(void)
+{
+ int sock = -1, map_fd, prog_fd, i, key;
+ long long value = 0, tcp_cnt, udp_cnt, icmp_cnt;
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(key), sizeof(value),
+ 256, NULL);
+ if (map_fd < 0) {
+ printf("failed to create map '%s'\n", strerror(errno));
+ goto cleanup;
+ }
+
+ struct bpf_insn prog[] = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, ETH_HLEN + offsetof(struct iphdr, protocol) /* R0 = ip->proto */),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+ BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = ARRAY_SIZE(prog);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = bpf_log_buf,
+ .log_size = BPF_LOG_BUF_SIZE,
+ );
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
+ prog, insns_cnt, &opts);
+ if (prog_fd < 0) {
+ printf("failed to load prog '%s'\n", strerror(errno));
+ goto cleanup;
+ }
+
+ sock = open_raw_sock("lo");
+
+ if (setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd,
+ sizeof(prog_fd)) < 0) {
+ printf("setsockopt %s\n", strerror(errno));
+ goto cleanup;
+ }
+
+ for (i = 0; i < 10; i++) {
+ key = IPPROTO_TCP;
+ assert(bpf_map_lookup_elem(map_fd, &key, &tcp_cnt) == 0);
+
+ key = IPPROTO_UDP;
+ assert(bpf_map_lookup_elem(map_fd, &key, &udp_cnt) == 0);
+
+ key = IPPROTO_ICMP;
+ assert(bpf_map_lookup_elem(map_fd, &key, &icmp_cnt) == 0);
+
+ printf("TCP %lld UDP %lld ICMP %lld packets\n",
+ tcp_cnt, udp_cnt, icmp_cnt);
+ sleep(1);
+ }
+
+cleanup:
+ /* maps, programs, raw sockets will auto cleanup on process exit */
+ return 0;
+}
+
+int main(void)
+{
+ FILE *f;
+
+ f = popen("ping -4 -c5 localhost", "r");
+ (void)f;
+
+ return test_sock();
+}
diff --git a/samples/bpf/sock_example.h b/samples/bpf/sock_example.h
new file mode 100644
index 000000000..a27d7579b
--- /dev/null
+++ b/samples/bpf/sock_example.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <stdlib.h>
+#include <stdio.h>
+#include <linux/unistd.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <linux/if_ether.h>
+#include <net/if.h>
+#include <linux/if_packet.h>
+#include <arpa/inet.h>
+
+static inline int open_raw_sock(const char *name)
+{
+ struct sockaddr_ll sll;
+ int sock;
+
+ sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL));
+ if (sock < 0) {
+ printf("cannot create raw socket\n");
+ return -1;
+ }
+
+ memset(&sll, 0, sizeof(sll));
+ sll.sll_family = AF_PACKET;
+ sll.sll_ifindex = if_nametoindex(name);
+ sll.sll_protocol = htons(ETH_P_ALL);
+ if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
+ printf("bind to %s: %s\n", name, strerror(errno));
+ close(sock);
+ return -1;
+ }
+
+ return sock;
+}
diff --git a/samples/bpf/sock_flags_kern.c b/samples/bpf/sock_flags_kern.c
new file mode 100644
index 000000000..6d0ac7569
--- /dev/null
+++ b/samples/bpf/sock_flags_kern.c
@@ -0,0 +1,49 @@
+#include <uapi/linux/bpf.h>
+#include <linux/socket.h>
+#include <linux/net.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/in6.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("cgroup/sock1")
+int bpf_prog1(struct bpf_sock *sk)
+{
+ char fmt[] = "socket: family %d type %d protocol %d\n";
+ char fmt2[] = "socket: uid %u gid %u\n";
+ __u64 gid_uid = bpf_get_current_uid_gid();
+ __u32 uid = gid_uid & 0xffffffff;
+ __u32 gid = gid_uid >> 32;
+
+ bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol);
+ bpf_trace_printk(fmt2, sizeof(fmt2), uid, gid);
+
+ /* block PF_INET6, SOCK_RAW, IPPROTO_ICMPV6 sockets
+ * ie., make ping6 fail
+ */
+ if (sk->family == PF_INET6 &&
+ sk->type == SOCK_RAW &&
+ sk->protocol == IPPROTO_ICMPV6)
+ return 0;
+
+ return 1;
+}
+
+SEC("cgroup/sock2")
+int bpf_prog2(struct bpf_sock *sk)
+{
+ char fmt[] = "socket: family %d type %d protocol %d\n";
+
+ bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol);
+
+ /* block PF_INET, SOCK_RAW, IPPROTO_ICMP sockets
+ * ie., make ping fail
+ */
+ if (sk->family == PF_INET &&
+ sk->type == SOCK_RAW &&
+ sk->protocol == IPPROTO_ICMP)
+ return 0;
+
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sockex1_kern.c b/samples/bpf/sockex1_kern.c
new file mode 100644
index 000000000..431c95646
--- /dev/null
+++ b/samples/bpf/sockex1_kern.c
@@ -0,0 +1,30 @@
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 256);
+} my_map SEC(".maps");
+
+SEC("socket1")
+int bpf_prog1(struct __sk_buff *skb)
+{
+ int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
+ long *value;
+
+ if (skb->pkt_type != PACKET_OUTGOING)
+ return 0;
+
+ value = bpf_map_lookup_elem(&my_map, &index);
+ if (value)
+ __sync_fetch_and_add(value, skb->len);
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sockex1_user.c b/samples/bpf/sockex1_user.c
new file mode 100644
index 000000000..9e8d39e24
--- /dev/null
+++ b/samples/bpf/sockex1_user.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <assert.h>
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "sock_example.h"
+#include <unistd.h>
+#include <arpa/inet.h>
+
+int main(int ac, char **argv)
+{
+ struct bpf_object *obj;
+ struct bpf_program *prog;
+ int map_fd, prog_fd;
+ char filename[256];
+ int i, sock, err;
+ FILE *f;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj))
+ return 1;
+
+ prog = bpf_object__next_program(obj, NULL);
+ bpf_program__set_type(prog, BPF_PROG_TYPE_SOCKET_FILTER);
+
+ err = bpf_object__load(obj);
+ if (err)
+ return 1;
+
+ prog_fd = bpf_program__fd(prog);
+ map_fd = bpf_object__find_map_fd_by_name(obj, "my_map");
+
+ sock = open_raw_sock("lo");
+
+ assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd,
+ sizeof(prog_fd)) == 0);
+
+ f = popen("ping -4 -c5 localhost", "r");
+ (void) f;
+
+ for (i = 0; i < 5; i++) {
+ long long tcp_cnt, udp_cnt, icmp_cnt;
+ int key;
+
+ key = IPPROTO_TCP;
+ assert(bpf_map_lookup_elem(map_fd, &key, &tcp_cnt) == 0);
+
+ key = IPPROTO_UDP;
+ assert(bpf_map_lookup_elem(map_fd, &key, &udp_cnt) == 0);
+
+ key = IPPROTO_ICMP;
+ assert(bpf_map_lookup_elem(map_fd, &key, &icmp_cnt) == 0);
+
+ printf("TCP %lld UDP %lld ICMP %lld bytes\n",
+ tcp_cnt, udp_cnt, icmp_cnt);
+ sleep(1);
+ }
+
+ return 0;
+}
diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c
new file mode 100644
index 000000000..b7997541f
--- /dev/null
+++ b/samples/bpf/sockex2_kern.c
@@ -0,0 +1,223 @@
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/if.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/if_tunnel.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1FFF
+
+struct vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+struct flow_key_record {
+ __be32 src;
+ __be32 dst;
+ union {
+ __be32 ports;
+ __be16 port16[2];
+ };
+ __u16 thoff;
+ __u8 ip_proto;
+};
+
+static inline int proto_ports_offset(__u64 proto)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_DCCP:
+ case IPPROTO_ESP:
+ case IPPROTO_SCTP:
+ case IPPROTO_UDPLITE:
+ return 0;
+ case IPPROTO_AH:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
+{
+ return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
+ & (IP_MF | IP_OFFSET);
+}
+
+static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
+{
+ __u64 w0 = load_word(ctx, off);
+ __u64 w1 = load_word(ctx, off + 4);
+ __u64 w2 = load_word(ctx, off + 8);
+ __u64 w3 = load_word(ctx, off + 12);
+
+ return (__u32)(w0 ^ w1 ^ w2 ^ w3);
+}
+
+static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
+ struct flow_key_record *flow)
+{
+ __u64 verlen;
+
+ if (unlikely(ip_is_fragment(skb, nhoff)))
+ *ip_proto = 0;
+ else
+ *ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
+
+ if (*ip_proto != IPPROTO_GRE) {
+ flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
+ flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
+ }
+
+ verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
+ if (likely(verlen == 0x45))
+ nhoff += 20;
+ else
+ nhoff += (verlen & 0xF) << 2;
+
+ return nhoff;
+}
+
+static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
+ struct flow_key_record *flow)
+{
+ *ip_proto = load_byte(skb,
+ nhoff + offsetof(struct ipv6hdr, nexthdr));
+ flow->src = ipv6_addr_hash(skb,
+ nhoff + offsetof(struct ipv6hdr, saddr));
+ flow->dst = ipv6_addr_hash(skb,
+ nhoff + offsetof(struct ipv6hdr, daddr));
+ nhoff += sizeof(struct ipv6hdr);
+
+ return nhoff;
+}
+
+static inline bool flow_dissector(struct __sk_buff *skb,
+ struct flow_key_record *flow)
+{
+ __u64 nhoff = ETH_HLEN;
+ __u64 ip_proto;
+ __u64 proto = load_half(skb, 12);
+ int poff;
+
+ if (proto == ETH_P_8021AD) {
+ proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
+ h_vlan_encapsulated_proto));
+ nhoff += sizeof(struct vlan_hdr);
+ }
+
+ if (proto == ETH_P_8021Q) {
+ proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
+ h_vlan_encapsulated_proto));
+ nhoff += sizeof(struct vlan_hdr);
+ }
+
+ if (likely(proto == ETH_P_IP))
+ nhoff = parse_ip(skb, nhoff, &ip_proto, flow);
+ else if (proto == ETH_P_IPV6)
+ nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow);
+ else
+ return false;
+
+ switch (ip_proto) {
+ case IPPROTO_GRE: {
+ struct gre_hdr {
+ __be16 flags;
+ __be16 proto;
+ };
+
+ __u64 gre_flags = load_half(skb,
+ nhoff + offsetof(struct gre_hdr, flags));
+ __u64 gre_proto = load_half(skb,
+ nhoff + offsetof(struct gre_hdr, proto));
+
+ if (gre_flags & (GRE_VERSION|GRE_ROUTING))
+ break;
+
+ proto = gre_proto;
+ nhoff += 4;
+ if (gre_flags & GRE_CSUM)
+ nhoff += 4;
+ if (gre_flags & GRE_KEY)
+ nhoff += 4;
+ if (gre_flags & GRE_SEQ)
+ nhoff += 4;
+
+ if (proto == ETH_P_8021Q) {
+ proto = load_half(skb,
+ nhoff + offsetof(struct vlan_hdr,
+ h_vlan_encapsulated_proto));
+ nhoff += sizeof(struct vlan_hdr);
+ }
+
+ if (proto == ETH_P_IP)
+ nhoff = parse_ip(skb, nhoff, &ip_proto, flow);
+ else if (proto == ETH_P_IPV6)
+ nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow);
+ else
+ return false;
+ break;
+ }
+ case IPPROTO_IPIP:
+ nhoff = parse_ip(skb, nhoff, &ip_proto, flow);
+ break;
+ case IPPROTO_IPV6:
+ nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow);
+ break;
+ default:
+ break;
+ }
+
+ flow->ip_proto = ip_proto;
+ poff = proto_ports_offset(ip_proto);
+ if (poff >= 0) {
+ nhoff += poff;
+ flow->ports = load_word(skb, nhoff);
+ }
+
+ flow->thoff = (__u16) nhoff;
+
+ return true;
+}
+
+struct pair {
+ long packets;
+ long bytes;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __be32);
+ __type(value, struct pair);
+ __uint(max_entries, 1024);
+} hash_map SEC(".maps");
+
+SEC("socket2")
+int bpf_prog2(struct __sk_buff *skb)
+{
+ struct flow_key_record flow = {};
+ struct pair *value;
+ u32 key;
+
+ if (!flow_dissector(skb, &flow))
+ return 0;
+
+ key = flow.dst;
+ value = bpf_map_lookup_elem(&hash_map, &key);
+ if (value) {
+ __sync_fetch_and_add(&value->packets, 1);
+ __sync_fetch_and_add(&value->bytes, skb->len);
+ } else {
+ struct pair val = {1, skb->len};
+
+ bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY);
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c
new file mode 100644
index 000000000..2c1847133
--- /dev/null
+++ b/samples/bpf/sockex2_user.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <assert.h>
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "sock_example.h"
+#include <unistd.h>
+#include <arpa/inet.h>
+
+struct pair {
+ __u64 packets;
+ __u64 bytes;
+};
+
+int main(int ac, char **argv)
+{
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ int map_fd, prog_fd;
+ char filename[256];
+ int i, sock, err;
+ FILE *f;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj))
+ return 1;
+
+ prog = bpf_object__next_program(obj, NULL);
+ bpf_program__set_type(prog, BPF_PROG_TYPE_SOCKET_FILTER);
+
+ err = bpf_object__load(obj);
+ if (err)
+ return 1;
+
+ prog_fd = bpf_program__fd(prog);
+ map_fd = bpf_object__find_map_fd_by_name(obj, "hash_map");
+
+ sock = open_raw_sock("lo");
+
+ assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd,
+ sizeof(prog_fd)) == 0);
+
+ f = popen("ping -4 -c5 localhost", "r");
+ (void) f;
+
+ for (i = 0; i < 5; i++) {
+ int key = 0, next_key;
+ struct pair value;
+
+ while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) {
+ bpf_map_lookup_elem(map_fd, &next_key, &value);
+ printf("ip %s bytes %lld packets %lld\n",
+ inet_ntoa((struct in_addr){htonl(next_key)}),
+ value.bytes, value.packets);
+ key = next_key;
+ }
+ sleep(1);
+ }
+ return 0;
+}
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
new file mode 100644
index 000000000..b36350335
--- /dev/null
+++ b/samples/bpf/sockex3_kern.c
@@ -0,0 +1,293 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/if.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/if_tunnel.h>
+#include <uapi/linux/mpls.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1FFF
+
+#define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u32));
+ __uint(max_entries, 8);
+} jmp_table SEC(".maps");
+
+#define PARSE_VLAN 1
+#define PARSE_MPLS 2
+#define PARSE_IP 3
+#define PARSE_IPV6 4
+
+/* Protocol dispatch routine. It tail-calls next BPF program depending
+ * on eth proto. Note, we could have used ...
+ *
+ * bpf_tail_call(skb, &jmp_table, proto);
+ *
+ * ... but it would need large prog_array and cannot be optimised given
+ * the map key is not static.
+ */
+static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto)
+{
+ switch (proto) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ bpf_tail_call(skb, &jmp_table, PARSE_VLAN);
+ break;
+ case ETH_P_MPLS_UC:
+ case ETH_P_MPLS_MC:
+ bpf_tail_call(skb, &jmp_table, PARSE_MPLS);
+ break;
+ case ETH_P_IP:
+ bpf_tail_call(skb, &jmp_table, PARSE_IP);
+ break;
+ case ETH_P_IPV6:
+ bpf_tail_call(skb, &jmp_table, PARSE_IPV6);
+ break;
+ }
+}
+
+struct vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+struct flow_key_record {
+ __be32 src;
+ __be32 dst;
+ union {
+ __be32 ports;
+ __be16 port16[2];
+ };
+ __u32 ip_proto;
+};
+
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
+{
+ return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
+ & (IP_MF | IP_OFFSET);
+}
+
+static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
+{
+ __u64 w0 = load_word(ctx, off);
+ __u64 w1 = load_word(ctx, off + 4);
+ __u64 w2 = load_word(ctx, off + 8);
+ __u64 w3 = load_word(ctx, off + 12);
+
+ return (__u32)(w0 ^ w1 ^ w2 ^ w3);
+}
+
+struct globals {
+ struct flow_key_record flow;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, struct globals);
+ __uint(max_entries, 32);
+} percpu_map SEC(".maps");
+
+/* user poor man's per_cpu until native support is ready */
+static struct globals *this_cpu_globals(void)
+{
+ u32 key = bpf_get_smp_processor_id();
+
+ return bpf_map_lookup_elem(&percpu_map, &key);
+}
+
+/* some simple stats for user space consumption */
+struct pair {
+ __u64 packets;
+ __u64 bytes;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct flow_key_record);
+ __type(value, struct pair);
+ __uint(max_entries, 1024);
+} hash_map SEC(".maps");
+
+static void update_stats(struct __sk_buff *skb, struct globals *g)
+{
+ struct flow_key_record key = g->flow;
+ struct pair *value;
+
+ value = bpf_map_lookup_elem(&hash_map, &key);
+ if (value) {
+ __sync_fetch_and_add(&value->packets, 1);
+ __sync_fetch_and_add(&value->bytes, skb->len);
+ } else {
+ struct pair val = {1, skb->len};
+
+ bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY);
+ }
+}
+
+static __always_inline void parse_ip_proto(struct __sk_buff *skb,
+ struct globals *g, __u32 ip_proto)
+{
+ __u32 nhoff = skb->cb[0];
+ int poff;
+
+ switch (ip_proto) {
+ case IPPROTO_GRE: {
+ struct gre_hdr {
+ __be16 flags;
+ __be16 proto;
+ };
+
+ __u32 gre_flags = load_half(skb,
+ nhoff + offsetof(struct gre_hdr, flags));
+ __u32 gre_proto = load_half(skb,
+ nhoff + offsetof(struct gre_hdr, proto));
+
+ if (gre_flags & (GRE_VERSION|GRE_ROUTING))
+ break;
+
+ nhoff += 4;
+ if (gre_flags & GRE_CSUM)
+ nhoff += 4;
+ if (gre_flags & GRE_KEY)
+ nhoff += 4;
+ if (gre_flags & GRE_SEQ)
+ nhoff += 4;
+
+ skb->cb[0] = nhoff;
+ parse_eth_proto(skb, gre_proto);
+ break;
+ }
+ case IPPROTO_IPIP:
+ parse_eth_proto(skb, ETH_P_IP);
+ break;
+ case IPPROTO_IPV6:
+ parse_eth_proto(skb, ETH_P_IPV6);
+ break;
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ g->flow.ports = load_word(skb, nhoff);
+ case IPPROTO_ICMP:
+ g->flow.ip_proto = ip_proto;
+ update_stats(skb, g);
+ break;
+ default:
+ break;
+ }
+}
+
+PROG(PARSE_IP)(struct __sk_buff *skb)
+{
+ struct globals *g = this_cpu_globals();
+ __u32 nhoff, verlen, ip_proto;
+
+ if (!g)
+ return 0;
+
+ nhoff = skb->cb[0];
+
+ if (unlikely(ip_is_fragment(skb, nhoff)))
+ return 0;
+
+ ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
+
+ if (ip_proto != IPPROTO_GRE) {
+ g->flow.src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
+ g->flow.dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
+ }
+
+ verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
+ nhoff += (verlen & 0xF) << 2;
+
+ skb->cb[0] = nhoff;
+ parse_ip_proto(skb, g, ip_proto);
+ return 0;
+}
+
+PROG(PARSE_IPV6)(struct __sk_buff *skb)
+{
+ struct globals *g = this_cpu_globals();
+ __u32 nhoff, ip_proto;
+
+ if (!g)
+ return 0;
+
+ nhoff = skb->cb[0];
+
+ ip_proto = load_byte(skb,
+ nhoff + offsetof(struct ipv6hdr, nexthdr));
+ g->flow.src = ipv6_addr_hash(skb,
+ nhoff + offsetof(struct ipv6hdr, saddr));
+ g->flow.dst = ipv6_addr_hash(skb,
+ nhoff + offsetof(struct ipv6hdr, daddr));
+ nhoff += sizeof(struct ipv6hdr);
+
+ skb->cb[0] = nhoff;
+ parse_ip_proto(skb, g, ip_proto);
+ return 0;
+}
+
+PROG(PARSE_VLAN)(struct __sk_buff *skb)
+{
+ __u32 nhoff, proto;
+
+ nhoff = skb->cb[0];
+
+ proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
+ h_vlan_encapsulated_proto));
+ nhoff += sizeof(struct vlan_hdr);
+ skb->cb[0] = nhoff;
+
+ parse_eth_proto(skb, proto);
+
+ return 0;
+}
+
+PROG(PARSE_MPLS)(struct __sk_buff *skb)
+{
+ __u32 nhoff, label;
+
+ nhoff = skb->cb[0];
+
+ label = load_word(skb, nhoff);
+ nhoff += sizeof(struct mpls_label);
+ skb->cb[0] = nhoff;
+
+ if (label & MPLS_LS_S_MASK) {
+ __u8 verlen = load_byte(skb, nhoff);
+ if ((verlen & 0xF0) == 4)
+ parse_eth_proto(skb, ETH_P_IP);
+ else
+ parse_eth_proto(skb, ETH_P_IPV6);
+ } else {
+ parse_eth_proto(skb, ETH_P_MPLS_UC);
+ }
+
+ return 0;
+}
+
+SEC("socket/0")
+int main_prog(struct __sk_buff *skb)
+{
+ __u32 nhoff = ETH_HLEN;
+ __u32 proto = load_half(skb, 12);
+
+ skb->cb[0] = nhoff;
+ parse_eth_proto(skb, proto);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
new file mode 100644
index 000000000..cd6fa79df
--- /dev/null
+++ b/samples/bpf/sockex3_user.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <assert.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "sock_example.h"
+#include <unistd.h>
+#include <arpa/inet.h>
+
+struct flow_key_record {
+ __be32 src;
+ __be32 dst;
+ union {
+ __be32 ports;
+ __be16 port16[2];
+ };
+ __u32 ip_proto;
+};
+
+struct pair {
+ __u64 packets;
+ __u64 bytes;
+};
+
+int main(int argc, char **argv)
+{
+ int i, sock, key, fd, main_prog_fd, jmp_table_fd, hash_map_fd;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ const char *section;
+ char filename[256];
+ FILE *f;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ jmp_table_fd = bpf_object__find_map_fd_by_name(obj, "jmp_table");
+ hash_map_fd = bpf_object__find_map_fd_by_name(obj, "hash_map");
+ if (jmp_table_fd < 0 || hash_map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ fd = bpf_program__fd(prog);
+
+ section = bpf_program__section_name(prog);
+ if (sscanf(section, "socket/%d", &key) != 1) {
+ fprintf(stderr, "ERROR: finding prog failed\n");
+ goto cleanup;
+ }
+
+ if (key == 0)
+ main_prog_fd = fd;
+ else
+ bpf_map_update_elem(jmp_table_fd, &key, &fd, BPF_ANY);
+ }
+
+ sock = open_raw_sock("lo");
+
+ /* attach BPF program to socket */
+ assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &main_prog_fd,
+ sizeof(__u32)) == 0);
+
+ if (argc > 1)
+ f = popen("ping -4 -c5 localhost", "r");
+ else
+ f = popen("netperf -l 4 localhost", "r");
+ (void) f;
+
+ for (i = 0; i < 5; i++) {
+ struct flow_key_record key = {}, next_key;
+ struct pair value;
+
+ sleep(1);
+ printf("IP src.port -> dst.port bytes packets\n");
+ while (bpf_map_get_next_key(hash_map_fd, &key, &next_key) == 0) {
+ bpf_map_lookup_elem(hash_map_fd, &next_key, &value);
+ printf("%s.%05d -> %s.%05d %12lld %12lld\n",
+ inet_ntoa((struct in_addr){htonl(next_key.src)}),
+ next_key.port16[0],
+ inet_ntoa((struct in_addr){htonl(next_key.dst)}),
+ next_key.port16[1],
+ value.bytes, value.packets);
+ key = next_key;
+ }
+ }
+
+cleanup:
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest_kern.c
new file mode 100644
index 000000000..455da7731
--- /dev/null
+++ b/samples/bpf/spintest_kern.c
@@ -0,0 +1,69 @@
+/* Copyright (c) 2016, Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/perf_event.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, long);
+ __type(value, long);
+ __uint(max_entries, 1024);
+} my_map SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(key_size, sizeof(long));
+ __uint(value_size, sizeof(long));
+ __uint(max_entries, 1024);
+} my_map2 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
+ __uint(max_entries, 10000);
+} stackmap SEC(".maps");
+
+#define PROG(foo) \
+int foo(struct pt_regs *ctx) \
+{ \
+ long v = PT_REGS_IP(ctx), *val; \
+\
+ val = bpf_map_lookup_elem(&my_map, &v); \
+ bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
+ bpf_map_update_elem(&my_map2, &v, &v, BPF_ANY); \
+ bpf_map_delete_elem(&my_map2, &v); \
+ bpf_get_stackid(ctx, &stackmap, BPF_F_REUSE_STACKID); \
+ return 0; \
+}
+
+/* add kprobes to all possible *spin* functions */
+SEC("kprobe/spin_unlock")PROG(p1)
+SEC("kprobe/spin_lock")PROG(p2)
+SEC("kprobe/mutex_spin_on_owner")PROG(p3)
+SEC("kprobe/rwsem_spin_on_owner")PROG(p4)
+SEC("kprobe/spin_unlock_irqrestore")PROG(p5)
+SEC("kprobe/_raw_spin_unlock_irqrestore")PROG(p6)
+SEC("kprobe/_raw_spin_unlock_bh")PROG(p7)
+SEC("kprobe/_raw_spin_unlock")PROG(p8)
+SEC("kprobe/_raw_spin_lock_irqsave")PROG(p9)
+SEC("kprobe/_raw_spin_trylock_bh")PROG(p10)
+SEC("kprobe/_raw_spin_lock_irq")PROG(p11)
+SEC("kprobe/_raw_spin_trylock")PROG(p12)
+SEC("kprobe/_raw_spin_lock")PROG(p13)
+SEC("kprobe/_raw_spin_lock_bh")PROG(p14)
+/* and to inner bpf helpers */
+SEC("kprobe/htab_map_update_elem")PROG(p15)
+SEC("kprobe/__htab_percpu_map_update_elem")PROG(p16)
+SEC("kprobe/htab_map_alloc")PROG(p17)
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/spintest_user.c b/samples/bpf/spintest_user.c
new file mode 100644
index 000000000..aadac14f7
--- /dev/null
+++ b/samples/bpf/spintest_user.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <assert.h>
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+#include "trace_helpers.h"
+
+int main(int ac, char **argv)
+{
+ char filename[256], symbol[256];
+ struct bpf_object *obj = NULL;
+ struct bpf_link *links[20];
+ long key, next_key, value;
+ struct bpf_program *prog;
+ int map_fd, i, j = 0;
+ const char *section;
+ struct ksym *sym;
+
+ if (load_kallsyms()) {
+ printf("failed to process /proc/kallsyms\n");
+ return 2;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ obj = NULL;
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "my_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ section = bpf_program__section_name(prog);
+ if (sscanf(section, "kprobe/%s", symbol) != 1)
+ continue;
+
+ /* Attach prog only when symbol exists */
+ if (ksym_get_addr(symbol)) {
+ links[j] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[j])) {
+ fprintf(stderr, "bpf_program__attach failed\n");
+ links[j] = NULL;
+ goto cleanup;
+ }
+ j++;
+ }
+ }
+
+ for (i = 0; i < 5; i++) {
+ key = 0;
+ printf("kprobing funcs:");
+ while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) {
+ bpf_map_lookup_elem(map_fd, &next_key, &value);
+ assert(next_key == value);
+ sym = ksym_search(value);
+ key = next_key;
+ if (!sym) {
+ printf("ksym not found. Is kallsyms loaded?\n");
+ continue;
+ }
+
+ printf(" %s", sym->name);
+ }
+ if (key)
+ printf("\n");
+ key = 0;
+ while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0)
+ bpf_map_delete_elem(map_fd, &next_key);
+ sleep(1);
+ }
+
+cleanup:
+ for (j--; j >= 0; j--)
+ bpf_link__destroy(links[j]);
+
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/syscall_nrs.c b/samples/bpf/syscall_nrs.c
new file mode 100644
index 000000000..88f940052
--- /dev/null
+++ b/samples/bpf/syscall_nrs.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <uapi/linux/unistd.h>
+#include <linux/kbuild.h>
+
+#define SYSNR(_NR) DEFINE(SYS ## _NR, _NR)
+
+void syscall_defines(void)
+{
+ COMMENT("Linux system call numbers.");
+ SYSNR(__NR_write);
+ SYSNR(__NR_read);
+#ifdef __NR_mmap2
+ SYSNR(__NR_mmap2);
+#endif
+#ifdef __NR_mmap
+ SYSNR(__NR_mmap);
+#endif
+
+}
diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c
new file mode 100644
index 000000000..50231c2ef
--- /dev/null
+++ b/samples/bpf/syscall_tp_kern.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2017 Facebook
+ */
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct syscalls_enter_open_args {
+ unsigned long long unused;
+ long syscall_nr;
+ long filename_ptr;
+ long flags;
+ long mode;
+};
+
+struct syscalls_exit_open_args {
+ unsigned long long unused;
+ long syscall_nr;
+ long ret;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 1);
+} enter_open_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 1);
+} exit_open_map SEC(".maps");
+
+static __always_inline void count(void *map)
+{
+ u32 key = 0;
+ u32 *value, init_val = 1;
+
+ value = bpf_map_lookup_elem(map, &key);
+ if (value)
+ *value += 1;
+ else
+ bpf_map_update_elem(map, &key, &init_val, BPF_NOEXIST);
+}
+
+SEC("tracepoint/syscalls/sys_enter_open")
+int trace_enter_open(struct syscalls_enter_open_args *ctx)
+{
+ count(&enter_open_map);
+ return 0;
+}
+
+SEC("tracepoint/syscalls/sys_enter_openat")
+int trace_enter_open_at(struct syscalls_enter_open_args *ctx)
+{
+ count(&enter_open_map);
+ return 0;
+}
+
+SEC("tracepoint/syscalls/sys_exit_open")
+int trace_enter_exit(struct syscalls_exit_open_args *ctx)
+{
+ count(&exit_open_map);
+ return 0;
+}
+
+SEC("tracepoint/syscalls/sys_exit_openat")
+int trace_enter_exit_at(struct syscalls_exit_open_args *ctx)
+{
+ count(&exit_open_map);
+ return 0;
+}
diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c
new file mode 100644
index 000000000..7a788bb83
--- /dev/null
+++ b/samples/bpf/syscall_tp_user.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2017 Facebook
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/perf_event.h>
+#include <errno.h>
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+
+/* This program verifies bpf attachment to tracepoint sys_enter_* and sys_exit_*.
+ * This requires kernel CONFIG_FTRACE_SYSCALLS to be set.
+ */
+
+static void usage(const char *cmd)
+{
+ printf("USAGE: %s [-i num_progs] [-h]\n", cmd);
+ printf(" -i num_progs # number of progs of the test\n");
+ printf(" -h # help\n");
+}
+
+static void verify_map(int map_id)
+{
+ __u32 key = 0;
+ __u32 val;
+
+ if (bpf_map_lookup_elem(map_id, &key, &val) != 0) {
+ fprintf(stderr, "map_lookup failed: %s\n", strerror(errno));
+ return;
+ }
+ if (val == 0) {
+ fprintf(stderr, "failed: map #%d returns value 0\n", map_id);
+ return;
+ }
+
+ printf("verify map:%d val: %d\n", map_id, val);
+
+ val = 0;
+ if (bpf_map_update_elem(map_id, &key, &val, BPF_ANY) != 0) {
+ fprintf(stderr, "map_update failed: %s\n", strerror(errno));
+ return;
+ }
+}
+
+static int test(char *filename, int num_progs)
+{
+ int map0_fds[num_progs], map1_fds[num_progs], fd, i, j = 0;
+ struct bpf_link *links[num_progs * 4];
+ struct bpf_object *objs[num_progs];
+ struct bpf_program *prog;
+
+ for (i = 0; i < num_progs; i++) {
+ objs[i] = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(objs[i])) {
+ fprintf(stderr, "opening BPF object file failed\n");
+ objs[i] = NULL;
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(objs[i])) {
+ fprintf(stderr, "loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map0_fds[i] = bpf_object__find_map_fd_by_name(objs[i],
+ "enter_open_map");
+ map1_fds[i] = bpf_object__find_map_fd_by_name(objs[i],
+ "exit_open_map");
+ if (map0_fds[i] < 0 || map1_fds[i] < 0) {
+ fprintf(stderr, "finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, objs[i]) {
+ links[j] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[j])) {
+ fprintf(stderr, "bpf_program__attach failed\n");
+ links[j] = NULL;
+ goto cleanup;
+ }
+ j++;
+ }
+ printf("prog #%d: map ids %d %d\n", i, map0_fds[i], map1_fds[i]);
+ }
+
+ /* current load_bpf_file has perf_event_open default pid = -1
+ * and cpu = 0, which permits attached bpf execution on
+ * all cpus for all pid's. bpf program execution ignores
+ * cpu affinity.
+ */
+ /* trigger some "open" operations */
+ fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "open failed: %s\n", strerror(errno));
+ return 1;
+ }
+ close(fd);
+
+ /* verify the map */
+ for (i = 0; i < num_progs; i++) {
+ verify_map(map0_fds[i]);
+ verify_map(map1_fds[i]);
+ }
+
+cleanup:
+ for (j--; j >= 0; j--)
+ bpf_link__destroy(links[j]);
+
+ for (i--; i >= 0; i--)
+ bpf_object__close(objs[i]);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int opt, num_progs = 1;
+ char filename[256];
+
+ while ((opt = getopt(argc, argv, "i:h")) != -1) {
+ switch (opt) {
+ case 'i':
+ num_progs = atoi(optarg);
+ break;
+ case 'h':
+ default:
+ usage(argv[0]);
+ return 0;
+ }
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ return test(filename, num_progs);
+}
diff --git a/samples/bpf/task_fd_query_kern.c b/samples/bpf/task_fd_query_kern.c
new file mode 100644
index 000000000..186ac0a79
--- /dev/null
+++ b/samples/bpf/task_fd_query_kern.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/version.h>
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("kprobe/blk_mq_start_request")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("kretprobe/__blk_account_io_done")
+int bpf_prog2(struct pt_regs *ctx)
+{
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
new file mode 100644
index 000000000..a33d74bd3
--- /dev/null
+++ b/samples/bpf/task_fd_query_user.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdint.h>
+#include <fcntl.h>
+#include <linux/bpf.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <linux/perf_event.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+#include "perf-sys.h"
+#include "trace_helpers.h"
+
+static struct bpf_program *progs[2];
+static struct bpf_link *links[2];
+
+#define CHECK_PERROR_RET(condition) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("FAIL: %s:\n", __func__); \
+ perror(" "); \
+ return -1; \
+ } \
+})
+
+#define CHECK_AND_RET(condition) ({ \
+ int __ret = !!(condition); \
+ if (__ret) \
+ return -1; \
+})
+
+static __u64 ptr_to_u64(void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
+#define PMU_TYPE_FILE "/sys/bus/event_source/devices/%s/type"
+static int bpf_find_probe_type(const char *event_type)
+{
+ char buf[256];
+ int fd, ret;
+
+ ret = snprintf(buf, sizeof(buf), PMU_TYPE_FILE, event_type);
+ CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
+
+ fd = open(buf, O_RDONLY);
+ CHECK_PERROR_RET(fd < 0);
+
+ ret = read(fd, buf, sizeof(buf));
+ close(fd);
+ CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
+
+ errno = 0;
+ ret = (int)strtol(buf, NULL, 10);
+ CHECK_PERROR_RET(errno);
+ return ret;
+}
+
+#define PMU_RETPROBE_FILE "/sys/bus/event_source/devices/%s/format/retprobe"
+static int bpf_get_retprobe_bit(const char *event_type)
+{
+ char buf[256];
+ int fd, ret;
+
+ ret = snprintf(buf, sizeof(buf), PMU_RETPROBE_FILE, event_type);
+ CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
+
+ fd = open(buf, O_RDONLY);
+ CHECK_PERROR_RET(fd < 0);
+
+ ret = read(fd, buf, sizeof(buf));
+ close(fd);
+ CHECK_PERROR_RET(ret < 0 || ret >= sizeof(buf));
+ CHECK_PERROR_RET(strlen(buf) < strlen("config:"));
+
+ errno = 0;
+ ret = (int)strtol(buf + strlen("config:"), NULL, 10);
+ CHECK_PERROR_RET(errno);
+ return ret;
+}
+
+static int test_debug_fs_kprobe(int link_idx, const char *fn_name,
+ __u32 expected_fd_type)
+{
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ int err, event_fd;
+ char buf[256];
+
+ len = sizeof(buf);
+ event_fd = bpf_link__fd(links[link_idx]);
+ err = bpf_task_fd_query(getpid(), event_fd, 0, buf, &len,
+ &prog_id, &fd_type, &probe_offset,
+ &probe_addr);
+ if (err < 0) {
+ printf("FAIL: %s, for event_fd idx %d, fn_name %s\n",
+ __func__, link_idx, fn_name);
+ perror(" :");
+ return -1;
+ }
+ if (strcmp(buf, fn_name) != 0 ||
+ fd_type != expected_fd_type ||
+ probe_offset != 0x0 || probe_addr != 0x0) {
+ printf("FAIL: bpf_trace_event_query(event_fd[%d]):\n",
+ link_idx);
+ printf("buf: %s, fd_type: %u, probe_offset: 0x%llx,"
+ " probe_addr: 0x%llx\n",
+ buf, fd_type, probe_offset, probe_addr);
+ return -1;
+ }
+ return 0;
+}
+
+static int test_nondebug_fs_kuprobe_common(const char *event_type,
+ const char *name, __u64 offset, __u64 addr, bool is_return,
+ char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
+ __u64 *probe_offset, __u64 *probe_addr)
+{
+ int is_return_bit = bpf_get_retprobe_bit(event_type);
+ int type = bpf_find_probe_type(event_type);
+ struct perf_event_attr attr = {};
+ struct bpf_link *link;
+ int fd, err = -1;
+
+ if (type < 0 || is_return_bit < 0) {
+ printf("FAIL: %s incorrect type (%d) or is_return_bit (%d)\n",
+ __func__, type, is_return_bit);
+ return err;
+ }
+
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ if (is_return)
+ attr.config |= 1 << is_return_bit;
+
+ if (name) {
+ attr.config1 = ptr_to_u64((void *)name);
+ attr.config2 = offset;
+ } else {
+ attr.config1 = 0;
+ attr.config2 = addr;
+ }
+ attr.size = sizeof(attr);
+ attr.type = type;
+
+ fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
+ link = bpf_program__attach_perf_event(progs[0], fd);
+ if (libbpf_get_error(link)) {
+ printf("ERROR: bpf_program__attach_perf_event failed\n");
+ link = NULL;
+ close(fd);
+ goto cleanup;
+ }
+
+ CHECK_PERROR_RET(bpf_task_fd_query(getpid(), fd, 0, buf, buf_len,
+ prog_id, fd_type, probe_offset, probe_addr) < 0);
+ err = 0;
+
+cleanup:
+ bpf_link__destroy(link);
+ return err;
+}
+
+static int test_nondebug_fs_probe(const char *event_type, const char *name,
+ __u64 offset, __u64 addr, bool is_return,
+ __u32 expected_fd_type,
+ __u32 expected_ret_fd_type,
+ char *buf, __u32 buf_len)
+{
+ __u64 probe_offset, probe_addr;
+ __u32 prog_id, fd_type;
+ int err;
+
+ err = test_nondebug_fs_kuprobe_common(event_type, name,
+ offset, addr, is_return,
+ buf, &buf_len, &prog_id,
+ &fd_type, &probe_offset,
+ &probe_addr);
+ if (err < 0) {
+ printf("FAIL: %s, "
+ "for name %s, offset 0x%llx, addr 0x%llx, is_return %d\n",
+ __func__, name ? name : "", offset, addr, is_return);
+ perror(" :");
+ return -1;
+ }
+ if ((is_return && fd_type != expected_ret_fd_type) ||
+ (!is_return && fd_type != expected_fd_type)) {
+ printf("FAIL: %s, incorrect fd_type %u\n",
+ __func__, fd_type);
+ return -1;
+ }
+ if (name) {
+ if (strcmp(name, buf) != 0) {
+ printf("FAIL: %s, incorrect buf %s\n", __func__, buf);
+ return -1;
+ }
+ if (probe_offset != offset) {
+ printf("FAIL: %s, incorrect probe_offset 0x%llx\n",
+ __func__, probe_offset);
+ return -1;
+ }
+ } else {
+ if (buf_len != 0) {
+ printf("FAIL: %s, incorrect buf %p\n",
+ __func__, buf);
+ return -1;
+ }
+
+ if (probe_addr != addr) {
+ printf("FAIL: %s, incorrect probe_addr 0x%llx\n",
+ __func__, probe_addr);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return)
+{
+ char buf[256], event_alias[sizeof("test_1234567890")];
+ const char *event_type = "uprobe";
+ struct perf_event_attr attr = {};
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ int err = -1, res, kfd, efd;
+ struct bpf_link *link;
+ ssize_t bytes;
+
+ snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events",
+ event_type);
+ kfd = open(buf, O_WRONLY | O_TRUNC, 0);
+ CHECK_PERROR_RET(kfd < 0);
+
+ res = snprintf(event_alias, sizeof(event_alias), "test_%d", getpid());
+ CHECK_PERROR_RET(res < 0 || res >= sizeof(event_alias));
+
+ res = snprintf(buf, sizeof(buf), "%c:%ss/%s %s:0x%lx",
+ is_return ? 'r' : 'p', event_type, event_alias,
+ binary_path, offset);
+ CHECK_PERROR_RET(res < 0 || res >= sizeof(buf));
+ CHECK_PERROR_RET(write(kfd, buf, strlen(buf)) < 0);
+
+ close(kfd);
+ kfd = -1;
+
+ snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%ss/%s/id",
+ event_type, event_alias);
+ efd = open(buf, O_RDONLY, 0);
+ CHECK_PERROR_RET(efd < 0);
+
+ bytes = read(efd, buf, sizeof(buf));
+ CHECK_PERROR_RET(bytes <= 0 || bytes >= sizeof(buf));
+ close(efd);
+ buf[bytes] = '\0';
+
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+
+ kfd = sys_perf_event_open(&attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ link = bpf_program__attach_perf_event(progs[0], kfd);
+ if (libbpf_get_error(link)) {
+ printf("ERROR: bpf_program__attach_perf_event failed\n");
+ link = NULL;
+ close(kfd);
+ goto cleanup;
+ }
+
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), kfd, 0, buf, &len,
+ &prog_id, &fd_type, &probe_offset,
+ &probe_addr);
+ if (err < 0) {
+ printf("FAIL: %s, binary_path %s\n", __func__, binary_path);
+ perror(" :");
+ return -1;
+ }
+ if ((is_return && fd_type != BPF_FD_TYPE_URETPROBE) ||
+ (!is_return && fd_type != BPF_FD_TYPE_UPROBE)) {
+ printf("FAIL: %s, incorrect fd_type %u\n", __func__,
+ fd_type);
+ return -1;
+ }
+ if (strcmp(binary_path, buf) != 0) {
+ printf("FAIL: %s, incorrect buf %s\n", __func__, buf);
+ return -1;
+ }
+ if (probe_offset != offset) {
+ printf("FAIL: %s, incorrect probe_offset 0x%llx\n", __func__,
+ probe_offset);
+ return -1;
+ }
+ err = 0;
+
+cleanup:
+ bpf_link__destroy(link);
+ return err;
+}
+
+int main(int argc, char **argv)
+{
+ extern char __executable_start;
+ char filename[256], buf[256];
+ __u64 uprobe_file_offset;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ int i = 0, err = -1;
+
+ if (load_kallsyms()) {
+ printf("failed to process /proc/kallsyms\n");
+ return err;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return err;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ progs[i] = prog;
+ links[i] = bpf_program__attach(progs[i]);
+ if (libbpf_get_error(links[i])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[i] = NULL;
+ goto cleanup;
+ }
+ i++;
+ }
+
+ /* test two functions in the corresponding *_kern.c file */
+ CHECK_AND_RET(test_debug_fs_kprobe(0, "blk_mq_start_request",
+ BPF_FD_TYPE_KPROBE));
+ CHECK_AND_RET(test_debug_fs_kprobe(1, "__blk_account_io_done",
+ BPF_FD_TYPE_KRETPROBE));
+
+ /* test nondebug fs kprobe */
+ CHECK_AND_RET(test_nondebug_fs_probe("kprobe", "bpf_check", 0x0, 0x0,
+ false, BPF_FD_TYPE_KPROBE,
+ BPF_FD_TYPE_KRETPROBE,
+ buf, sizeof(buf)));
+#ifdef __x86_64__
+ /* set a kprobe on "bpf_check + 0x5", which is x64 specific */
+ CHECK_AND_RET(test_nondebug_fs_probe("kprobe", "bpf_check", 0x5, 0x0,
+ false, BPF_FD_TYPE_KPROBE,
+ BPF_FD_TYPE_KRETPROBE,
+ buf, sizeof(buf)));
+#endif
+ CHECK_AND_RET(test_nondebug_fs_probe("kprobe", "bpf_check", 0x0, 0x0,
+ true, BPF_FD_TYPE_KPROBE,
+ BPF_FD_TYPE_KRETPROBE,
+ buf, sizeof(buf)));
+ CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
+ ksym_get_addr("bpf_check"), false,
+ BPF_FD_TYPE_KPROBE,
+ BPF_FD_TYPE_KRETPROBE,
+ buf, sizeof(buf)));
+ CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
+ ksym_get_addr("bpf_check"), false,
+ BPF_FD_TYPE_KPROBE,
+ BPF_FD_TYPE_KRETPROBE,
+ NULL, 0));
+ CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
+ ksym_get_addr("bpf_check"), true,
+ BPF_FD_TYPE_KPROBE,
+ BPF_FD_TYPE_KRETPROBE,
+ buf, sizeof(buf)));
+ CHECK_AND_RET(test_nondebug_fs_probe("kprobe", NULL, 0x0,
+ ksym_get_addr("bpf_check"), true,
+ BPF_FD_TYPE_KPROBE,
+ BPF_FD_TYPE_KRETPROBE,
+ 0, 0));
+
+ /* test nondebug fs uprobe */
+ /* the calculation of uprobe file offset is based on gcc 7.3.1 on x64
+ * and the default linker script, which defines __executable_start as
+ * the start of the .text section. The calculation could be different
+ * on different systems with different compilers. The right way is
+ * to parse the ELF file. We took a shortcut here.
+ */
+ uprobe_file_offset = (unsigned long)main - (unsigned long)&__executable_start;
+ CHECK_AND_RET(test_nondebug_fs_probe("uprobe", (char *)argv[0],
+ uprobe_file_offset, 0x0, false,
+ BPF_FD_TYPE_UPROBE,
+ BPF_FD_TYPE_URETPROBE,
+ buf, sizeof(buf)));
+ CHECK_AND_RET(test_nondebug_fs_probe("uprobe", (char *)argv[0],
+ uprobe_file_offset, 0x0, true,
+ BPF_FD_TYPE_UPROBE,
+ BPF_FD_TYPE_URETPROBE,
+ buf, sizeof(buf)));
+
+ /* test debug fs uprobe */
+ CHECK_AND_RET(test_debug_fs_uprobe((char *)argv[0], uprobe_file_offset,
+ false));
+ CHECK_AND_RET(test_debug_fs_uprobe((char *)argv[0], uprobe_file_offset,
+ true));
+ err = 0;
+
+cleanup:
+ for (i--; i >= 0; i--)
+ bpf_link__destroy(links[i]);
+
+ bpf_object__close(obj);
+ return err;
+}
diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh
new file mode 100755
index 000000000..37d95ef3c
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect.sh
@@ -0,0 +1,174 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+[[ -z $TC ]] && TC='tc'
+[[ -z $IP ]] && IP='ip'
+
+REDIRECT_USER='./tc_l2_redirect'
+REDIRECT_BPF='./tc_l2_redirect_kern.o'
+
+RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter)
+IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding)
+
+function config_common {
+ local tun_type=$1
+
+ $IP netns add ns1
+ $IP netns add ns2
+ $IP link add ve1 type veth peer name vens1
+ $IP link add ve2 type veth peer name vens2
+ $IP link set dev ve1 up
+ $IP link set dev ve2 up
+ $IP link set dev ve1 mtu 1500
+ $IP link set dev ve2 mtu 1500
+ $IP link set dev vens1 netns ns1
+ $IP link set dev vens2 netns ns2
+
+ $IP -n ns1 link set dev lo up
+ $IP -n ns1 link set dev vens1 up
+ $IP -n ns1 addr add 10.1.1.101/24 dev vens1
+ $IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad
+ $IP -n ns1 route add default via 10.1.1.1 dev vens1
+ $IP -n ns1 route add default via 2401:db01::1 dev vens1
+
+ $IP -n ns2 link set dev lo up
+ $IP -n ns2 link set dev vens2 up
+ $IP -n ns2 addr add 10.2.1.102/24 dev vens2
+ $IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad
+ $IP -n ns2 addr add 10.10.1.102 dev lo
+ $IP -n ns2 addr add 2401:face::66/64 dev lo nodad
+ $IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1
+ $IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1
+ $IP -n ns2 link set dev ipt2 up
+ $IP -n ns2 link set dev ip6t2 up
+ $IP netns exec ns2 $TC qdisc add dev vens2 clsact
+ $IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip
+ if [[ $tun_type == "ipip" ]]; then
+ $IP -n ns2 route add 10.1.1.0/24 dev ipt2
+ $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+ $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0
+ else
+ $IP -n ns2 route add 10.1.1.0/24 dev ip6t2
+ $IP -n ns2 route add 2401:db01::/64 dev ip6t2
+ $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+ $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0
+ fi
+
+ $IP addr add 10.1.1.1/24 dev ve1
+ $IP addr add 2401:db01::1/64 dev ve1 nodad
+ $IP addr add 10.2.1.1/24 dev ve2
+ $IP addr add 2401:db02::1/64 dev ve2 nodad
+
+ $TC qdisc add dev ve2 clsact
+ $TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward
+
+ sysctl -q -w net.ipv4.conf.all.rp_filter=0
+ sysctl -q -w net.ipv6.conf.all.forwarding=1
+}
+
+function cleanup {
+ set +e
+ [[ -z $DEBUG ]] || set +x
+ $IP netns delete ns1 >& /dev/null
+ $IP netns delete ns2 >& /dev/null
+ $IP link del ve1 >& /dev/null
+ $IP link del ve2 >& /dev/null
+ $IP link del ipt >& /dev/null
+ $IP link del ip6t >& /dev/null
+ sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER
+ sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING
+ rm -f /sys/fs/bpf/tc/globals/tun_iface
+ [[ -z $DEBUG ]] || set -x
+ set -e
+}
+
+function l2_to_ipip {
+ echo -n "l2_to_ipip $1: "
+
+ local dir=$1
+
+ config_common ipip
+
+ $IP link add ipt type ipip external
+ $IP link set dev ipt up
+ sysctl -q -w net.ipv4.conf.ipt.rp_filter=0
+ sysctl -q -w net.ipv4.conf.ipt.forwarding=1
+
+ if [[ $dir == "egress" ]]; then
+ $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
+ $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
+ sysctl -q -w net.ipv4.conf.ve1.forwarding=1
+ else
+ $TC qdisc add dev ve1 clsact
+ $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
+ fi
+
+ $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex)
+
+ $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
+
+ if [[ $dir == "egress" ]]; then
+ # test direct egress to ve2 (i.e. not forwarding from
+ # ve1 to ve2).
+ ping -c1 10.10.1.102 >& /dev/null
+ fi
+
+ cleanup
+
+ echo "OK"
+}
+
+function l2_to_ip6tnl {
+ echo -n "l2_to_ip6tnl $1: "
+
+ local dir=$1
+
+ config_common ip6tnl
+
+ $IP link add ip6t type ip6tnl mode any external
+ $IP link set dev ip6t up
+ sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0
+ sysctl -q -w net.ipv4.conf.ip6t.forwarding=1
+
+ if [[ $dir == "egress" ]]; then
+ $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
+ $IP route add 2401:face::/64 via 2401:db02::66 dev ve2
+ $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
+ sysctl -q -w net.ipv4.conf.ve1.forwarding=1
+ else
+ $TC qdisc add dev ve1 clsact
+ $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
+ fi
+
+ $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex)
+
+ $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
+ $IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null
+
+ if [[ $dir == "egress" ]]; then
+ # test direct egress to ve2 (i.e. not forwarding from
+ # ve1 to ve2).
+ ping -c1 10.10.1.102 >& /dev/null
+ ping -6 -c1 2401:face::66 >& /dev/null
+ fi
+
+ cleanup
+
+ echo "OK"
+}
+
+cleanup
+test_names="l2_to_ipip l2_to_ip6tnl"
+test_dirs="ingress egress"
+if [[ $# -ge 2 ]]; then
+ test_names=$1
+ test_dirs=$2
+elif [[ $# -ge 1 ]]; then
+ test_names=$1
+fi
+
+for t in $test_names; do
+ for d in $test_dirs; do
+ $t $d
+ done
+done
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
new file mode 100644
index 000000000..fd2fa0004
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -0,0 +1,237 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/filter.h>
+#include <uapi/linux/pkt_cls.h>
+#include <net/ipv6.h>
+#include <bpf/bpf_helpers.h>
+
+#define _htonl __builtin_bswap32
+
+#define PIN_GLOBAL_NS 2
+struct bpf_elf_map {
+ __u32 type;
+ __u32 size_key;
+ __u32 size_value;
+ __u32 max_elem;
+ __u32 flags;
+ __u32 id;
+ __u32 pinning;
+};
+
+/* copy of 'struct ethhdr' without __packed */
+struct eth_hdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ unsigned short h_proto;
+};
+
+struct bpf_elf_map SEC("maps") tun_iface = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .size_key = sizeof(int),
+ .size_value = sizeof(int),
+ .pinning = PIN_GLOBAL_NS,
+ .max_elem = 1,
+};
+
+static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr)
+{
+ if (eth_proto == htons(ETH_P_IP))
+ return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100);
+ else if (eth_proto == htons(ETH_P_IPV6))
+ return (daddr == _htonl(0x2401face));
+
+ return false;
+}
+
+SEC("l2_to_iptun_ingress_forward")
+int _l2_to_iptun_ingress_forward(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key tkey = {};
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ void *data_end = (void *)(long)skb->data_end;
+ int key = 0, *ifindex;
+
+ int ret;
+
+ if (data + sizeof(*eth) > data_end)
+ return TC_ACT_OK;
+
+ ifindex = bpf_map_lookup_elem(&tun_iface, &key);
+ if (!ifindex)
+ return TC_ACT_OK;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n";
+ struct iphdr *iph = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+ return TC_ACT_OK;
+
+ if (iph->protocol != IPPROTO_IPIP)
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex,
+ _htonl(iph->daddr));
+ return bpf_redirect(*ifindex, BPF_F_INGRESS);
+ } else if (eth->h_proto == htons(ETH_P_IPV6)) {
+ char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n";
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+ return TC_ACT_OK;
+
+ if (ip6h->nexthdr != IPPROTO_IPIP &&
+ ip6h->nexthdr != IPPROTO_IPV6)
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex,
+ _htonl(ip6h->daddr.s6_addr32[0]),
+ _htonl(ip6h->daddr.s6_addr32[3]));
+ return bpf_redirect(*ifindex, BPF_F_INGRESS);
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("l2_to_iptun_ingress_redirect")
+int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key tkey = {};
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ void *data_end = (void *)(long)skb->data_end;
+ int key = 0, *ifindex;
+
+ int ret;
+
+ if (data + sizeof(*eth) > data_end)
+ return TC_ACT_OK;
+
+ ifindex = bpf_map_lookup_elem(&tun_iface, &key);
+ if (!ifindex)
+ return TC_ACT_OK;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
+ struct iphdr *iph = data + sizeof(*eth);
+ __be32 daddr = iph->daddr;
+
+ if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+ return TC_ACT_OK;
+
+ if (!is_vip_addr(eth->h_proto, daddr))
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex);
+ } else {
+ return TC_ACT_OK;
+ }
+
+ tkey.tunnel_id = 10000;
+ tkey.tunnel_ttl = 64;
+ tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */
+ bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0);
+ return bpf_redirect(*ifindex, 0);
+}
+
+SEC("l2_to_ip6tun_ingress_redirect")
+int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key tkey = {};
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ void *data_end = (void *)(long)skb->data_end;
+ int key = 0, *ifindex;
+
+ if (data + sizeof(*eth) > data_end)
+ return TC_ACT_OK;
+
+ ifindex = bpf_map_lookup_elem(&tun_iface, &key);
+ if (!ifindex)
+ return TC_ACT_OK;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
+ struct iphdr *iph = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+ return TC_ACT_OK;
+
+ if (!is_vip_addr(eth->h_proto, iph->daddr))
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr),
+ *ifindex);
+ } else if (eth->h_proto == htons(ETH_P_IPV6)) {
+ char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n";
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+ return TC_ACT_OK;
+
+ if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
+ return TC_ACT_OK;
+
+ bpf_trace_printk(fmt6, sizeof(fmt6),
+ _htonl(ip6h->daddr.s6_addr32[0]), *ifindex);
+ } else {
+ return TC_ACT_OK;
+ }
+
+ tkey.tunnel_id = 10000;
+ tkey.tunnel_ttl = 64;
+ /* 2401:db02:0:0:0:0:0:66 */
+ tkey.remote_ipv6[0] = _htonl(0x2401db02);
+ tkey.remote_ipv6[1] = 0;
+ tkey.remote_ipv6[2] = 0;
+ tkey.remote_ipv6[3] = _htonl(0x00000066);
+ bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6);
+ return bpf_redirect(*ifindex, 0);
+}
+
+SEC("drop_non_tun_vip")
+int _drop_non_tun_vip(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key tkey = {};
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ void *data_end = (void *)(long)skb->data_end;
+
+ if (data + sizeof(*eth) > data_end)
+ return TC_ACT_OK;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*iph) > data_end)
+ return TC_ACT_OK;
+
+ if (is_vip_addr(eth->h_proto, iph->daddr))
+ return TC_ACT_SHOT;
+ } else if (eth->h_proto == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+
+ if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+ return TC_ACT_OK;
+
+ if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tc_l2_redirect_user.c b/samples/bpf/tc_l2_redirect_user.c
new file mode 100644
index 000000000..d11a6e1e9
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_user.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 Facebook
+ */
+#include <linux/unistd.h>
+#include <linux/bpf.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+
+#include <bpf/bpf.h>
+
+static void usage(void)
+{
+ printf("Usage: tc_l2_ipip_redirect [...]\n");
+ printf(" -U <file> Update an already pinned BPF array\n");
+ printf(" -i <ifindex> Interface index\n");
+ printf(" -h Display this help\n");
+}
+
+int main(int argc, char **argv)
+{
+ const char *pinned_file = NULL;
+ int ifindex = -1;
+ int array_key = 0;
+ int array_fd = -1;
+ int ret = -1;
+ int opt;
+
+ while ((opt = getopt(argc, argv, "F:U:i:")) != -1) {
+ switch (opt) {
+ /* General args */
+ case 'U':
+ pinned_file = optarg;
+ break;
+ case 'i':
+ ifindex = atoi(optarg);
+ break;
+ default:
+ usage();
+ goto out;
+ }
+ }
+
+ if (ifindex < 0 || !pinned_file) {
+ usage();
+ goto out;
+ }
+
+ array_fd = bpf_obj_get(pinned_file);
+ if (array_fd < 0) {
+ fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
+ pinned_file, strerror(errno), errno);
+ goto out;
+ }
+
+ /* bpf_tunnel_key.remote_ipv4 expects host byte orders */
+ ret = bpf_map_update_elem(array_fd, &array_key, &ifindex, 0);
+ if (ret) {
+ perror("bpf_map_update_elem");
+ goto out;
+ }
+
+out:
+ if (array_fd != -1)
+ close(array_fd);
+ return ret;
+}
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
new file mode 100644
index 000000000..e9356130f
--- /dev/null
+++ b/samples/bpf/tcbpf1_kern.c
@@ -0,0 +1,91 @@
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/in.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/filter.h>
+#include <uapi/linux/pkt_cls.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+/* compiler workaround */
+#define _htonl __builtin_bswap32
+
+static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
+{
+ bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1);
+}
+
+#define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check))
+#define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos))
+
+static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
+{
+ __u8 old_tos = load_byte(skb, TOS_OFF);
+
+ bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
+ bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
+}
+
+#define TCP_CSUM_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, check))
+#define IP_SRC_OFF (ETH_HLEN + offsetof(struct iphdr, saddr))
+
+#define IS_PSEUDO 0x10
+
+static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
+{
+ __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF));
+
+ bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
+ bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
+ bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
+}
+
+#define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
+static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
+{
+ __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF));
+
+ bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
+ bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
+}
+
+SEC("classifier")
+int bpf_prog1(struct __sk_buff *skb)
+{
+ __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
+ long *value;
+
+ if (proto == IPPROTO_TCP) {
+ set_ip_tos(skb, 8);
+ set_tcp_ip_src(skb, 0xA010101);
+ set_tcp_dest_port(skb, 5001);
+ }
+
+ return 0;
+}
+SEC("redirect_xmit")
+int _redirect_xmit(struct __sk_buff *skb)
+{
+ return bpf_redirect(skb->ifindex + 1, 0);
+}
+SEC("redirect_recv")
+int _redirect_recv(struct __sk_buff *skb)
+{
+ return bpf_redirect(skb->ifindex + 1, 1);
+}
+SEC("clone_redirect_xmit")
+int _clone_redirect_xmit(struct __sk_buff *skb)
+{
+ bpf_clone_redirect(skb, skb->ifindex + 1, 0);
+ return TC_ACT_SHOT;
+}
+SEC("clone_redirect_recv")
+int _clone_redirect_recv(struct __sk_buff *skb)
+{
+ bpf_clone_redirect(skb, skb->ifindex + 1, 1);
+ return TC_ACT_SHOT;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tcp_basertt_kern.c b/samples/bpf/tcp_basertt_kern.c
new file mode 100644
index 000000000..822b0742b
--- /dev/null
+++ b/samples/bpf/tcp_basertt_kern.c
@@ -0,0 +1,71 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * BPF program to set base_rtt to 80us when host is running TCP-NV and
+ * both hosts are in the same datacenter (as determined by IPv6 prefix).
+ *
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <linux/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define DEBUG 1
+
+SEC("sockops")
+int bpf_basertt(struct bpf_sock_ops *skops)
+{
+ char cong[20];
+ char nv[] = "nv";
+ int rv = 0, n;
+ int op;
+
+ op = (int) skops->op;
+
+#ifdef DEBUG
+ bpf_printk("BPF command: %d\n", op);
+#endif
+
+ /* Check if both hosts are in the same datacenter. For this
+ * example they are if the 1st 5.5 bytes in the IPv6 address
+ * are the same.
+ */
+ if (skops->family == AF_INET6 &&
+ skops->local_ip6[0] == skops->remote_ip6[0] &&
+ (bpf_ntohl(skops->local_ip6[1]) & 0xfff00000) ==
+ (bpf_ntohl(skops->remote_ip6[1]) & 0xfff00000)) {
+ switch (op) {
+ case BPF_SOCK_OPS_BASE_RTT:
+ n = bpf_getsockopt(skops, SOL_TCP, TCP_CONGESTION,
+ cong, sizeof(cong));
+ if (!n && !__builtin_memcmp(cong, nv, sizeof(nv))) {
+ /* Set base_rtt to 80us */
+ rv = 80;
+ } else if (n) {
+ rv = n;
+ } else {
+ rv = -1;
+ }
+ break;
+ default:
+ rv = -1;
+ }
+ } else {
+ rv = -1;
+ }
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tcp_bpf.readme b/samples/bpf/tcp_bpf.readme
new file mode 100644
index 000000000..78e247f62
--- /dev/null
+++ b/samples/bpf/tcp_bpf.readme
@@ -0,0 +1,28 @@
+This file describes how to run the tcp_*_kern.o tcp_bpf (or socket_ops)
+programs. These programs attach to a cgroupv2. The following commands create
+a cgroupv2 and attach a bash shell to the group.
+
+ mkdir -p /tmp/cgroupv2
+ mount -t cgroup2 none /tmp/cgroupv2
+ mkdir -p /tmp/cgroupv2/foo
+ bash
+ echo $$ >> /tmp/cgroupv2/foo/cgroup.procs
+
+Anything that runs under this shell belongs to the foo cgroupv2. To load
+(attach) one of the tcp_*_kern.o programs:
+
+ bpftool prog load tcp_basertt_kern.o /sys/fs/bpf/tcp_prog
+ bpftool cgroup attach /tmp/cgroupv2/foo sock_ops pinned /sys/fs/bpf/tcp_prog
+ bpftool prog tracelog
+
+"bpftool prog tracelog" will continue to run printing the BPF log buffer.
+The tcp_*_kern.o programs use special print functions to print logging
+information (if enabled by the ifdef).
+
+If using netperf/netserver to create traffic, you need to run them under the
+cgroupv2 to which the BPF programs are attached (i.e. under bash shell
+attached to the cgroupv2).
+
+To remove (unattach) a socket_ops BPF program from a cgroupv2:
+
+ bpftool cgroup detach /tmp/cgroupv2/foo sock_ops pinned /sys/fs/bpf/tcp_prog
diff --git a/samples/bpf/tcp_bufs_kern.c b/samples/bpf/tcp_bufs_kern.c
new file mode 100644
index 000000000..6a80d0895
--- /dev/null
+++ b/samples/bpf/tcp_bufs_kern.c
@@ -0,0 +1,81 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * BPF program to set initial receive window to 40 packets and send
+ * and receive buffers to 1.5MB. This would usually be done after
+ * doing appropriate checks that indicate the hosts are far enough
+ * away (i.e. large RTT).
+ *
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <linux/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define DEBUG 1
+
+SEC("sockops")
+int bpf_bufs(struct bpf_sock_ops *skops)
+{
+ int bufsize = 1500000;
+ int rwnd_init = 40;
+ int rv = 0;
+ int op;
+
+ /* For testing purposes, only execute rest of BPF program
+ * if neither port numberis 55601
+ */
+ if (bpf_ntohl(skops->remote_port) != 55601 &&
+ skops->local_port != 55601) {
+ skops->reply = -1;
+ return 1;
+ }
+
+ op = (int) skops->op;
+
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+
+ /* Usually there would be a check to insure the hosts are far
+ * from each other so it makes sense to increase buffer sizes
+ */
+ switch (op) {
+ case BPF_SOCK_OPS_RWND_INIT:
+ rv = rwnd_init;
+ break;
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ /* Set sndbuf and rcvbuf of active connections */
+ rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize,
+ sizeof(bufsize));
+ rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF,
+ &bufsize, sizeof(bufsize));
+ break;
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ /* Nothing to do */
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ /* Set sndbuf and rcvbuf of passive connections */
+ rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize,
+ sizeof(bufsize));
+ rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF,
+ &bufsize, sizeof(bufsize));
+ break;
+ default:
+ rv = -1;
+ }
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tcp_clamp_kern.c b/samples/bpf/tcp_clamp_kern.c
new file mode 100644
index 000000000..e88bd9ab0
--- /dev/null
+++ b/samples/bpf/tcp_clamp_kern.c
@@ -0,0 +1,97 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Sample BPF program to set send and receive buffers to 150KB, sndcwnd clamp
+ * to 100 packets and SYN and SYN_ACK RTOs to 10ms when both hosts are within
+ * the same datacenter. For his example, we assume they are within the same
+ * datacenter when the first 5.5 bytes of their IPv6 addresses are the same.
+ *
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <linux/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define DEBUG 1
+
+SEC("sockops")
+int bpf_clamp(struct bpf_sock_ops *skops)
+{
+ int bufsize = 150000;
+ int to_init = 10;
+ int clamp = 100;
+ int rv = 0;
+ int op;
+
+ /* For testing purposes, only execute rest of BPF program
+ * if neither port numberis 55601
+ */
+ if (bpf_ntohl(skops->remote_port) != 55601 && skops->local_port != 55601) {
+ skops->reply = -1;
+ return 0;
+ }
+
+ op = (int) skops->op;
+
+#ifdef DEBUG
+ bpf_printk("BPF command: %d\n", op);
+#endif
+
+ /* Check that both hosts are within same datacenter. For this example
+ * it is the case when the first 5.5 bytes of their IPv6 addresses are
+ * the same.
+ */
+ if (skops->family == AF_INET6 &&
+ skops->local_ip6[0] == skops->remote_ip6[0] &&
+ (bpf_ntohl(skops->local_ip6[1]) & 0xfff00000) ==
+ (bpf_ntohl(skops->remote_ip6[1]) & 0xfff00000)) {
+ switch (op) {
+ case BPF_SOCK_OPS_TIMEOUT_INIT:
+ rv = to_init;
+ break;
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ /* Set sndbuf and rcvbuf of active connections */
+ rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF,
+ &bufsize, sizeof(bufsize));
+ rv += bpf_setsockopt(skops, SOL_SOCKET,
+ SO_RCVBUF, &bufsize,
+ sizeof(bufsize));
+ break;
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ rv = bpf_setsockopt(skops, SOL_TCP,
+ TCP_BPF_SNDCWND_CLAMP,
+ &clamp, sizeof(clamp));
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ /* Set sndbuf and rcvbuf of passive connections */
+ rv = bpf_setsockopt(skops, SOL_TCP,
+ TCP_BPF_SNDCWND_CLAMP,
+ &clamp, sizeof(clamp));
+ rv += bpf_setsockopt(skops, SOL_SOCKET,
+ SO_SNDBUF, &bufsize,
+ sizeof(bufsize));
+ rv += bpf_setsockopt(skops, SOL_SOCKET,
+ SO_RCVBUF, &bufsize,
+ sizeof(bufsize));
+ break;
+ default:
+ rv = -1;
+ }
+ } else {
+ rv = -1;
+ }
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tcp_cong_kern.c b/samples/bpf/tcp_cong_kern.c
new file mode 100644
index 000000000..2311fc9dd
--- /dev/null
+++ b/samples/bpf/tcp_cong_kern.c
@@ -0,0 +1,78 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * BPF program to set congestion control to dctcp when both hosts are
+ * in the same datacenter (as deteremined by IPv6 prefix).
+ *
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <linux/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define DEBUG 1
+
+SEC("sockops")
+int bpf_cong(struct bpf_sock_ops *skops)
+{
+ char cong[] = "dctcp";
+ int rv = 0;
+ int op;
+
+ /* For testing purposes, only execute rest of BPF program
+ * if neither port numberis 55601
+ */
+ if (bpf_ntohl(skops->remote_port) != 55601 &&
+ skops->local_port != 55601) {
+ skops->reply = -1;
+ return 1;
+ }
+
+ op = (int) skops->op;
+
+#ifdef DEBUG
+ bpf_printk("BPF command: %d\n", op);
+#endif
+
+ /* Check if both hosts are in the same datacenter. For this
+ * example they are if the 1st 5.5 bytes in the IPv6 address
+ * are the same.
+ */
+ if (skops->family == AF_INET6 &&
+ skops->local_ip6[0] == skops->remote_ip6[0] &&
+ (bpf_ntohl(skops->local_ip6[1]) & 0xfff00000) ==
+ (bpf_ntohl(skops->remote_ip6[1]) & 0xfff00000)) {
+ switch (op) {
+ case BPF_SOCK_OPS_NEEDS_ECN:
+ rv = 1;
+ break;
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ rv = bpf_setsockopt(skops, SOL_TCP, TCP_CONGESTION,
+ cong, sizeof(cong));
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ rv = bpf_setsockopt(skops, SOL_TCP, TCP_CONGESTION,
+ cong, sizeof(cong));
+ break;
+ default:
+ rv = -1;
+ }
+ } else {
+ rv = -1;
+ }
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tcp_dumpstats_kern.c b/samples/bpf/tcp_dumpstats_kern.c
new file mode 100644
index 000000000..e80d3afd2
--- /dev/null
+++ b/samples/bpf/tcp_dumpstats_kern.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Refer to samples/bpf/tcp_bpf.readme for the instructions on
+ * how to run this sample program.
+ */
+#include <linux/bpf.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define INTERVAL 1000000000ULL
+
+int _version SEC("version") = 1;
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __u32 type;
+ __u32 map_flags;
+ int *key;
+ __u64 *value;
+} bpf_next_dump SEC(".maps") = {
+ .type = BPF_MAP_TYPE_SK_STORAGE,
+ .map_flags = BPF_F_NO_PREALLOC,
+};
+
+SEC("sockops")
+int _sockops(struct bpf_sock_ops *ctx)
+{
+ struct bpf_tcp_sock *tcp_sk;
+ struct bpf_sock *sk;
+ __u64 *next_dump;
+ __u64 now;
+
+ switch (ctx->op) {
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ bpf_sock_ops_cb_flags_set(ctx, BPF_SOCK_OPS_RTT_CB_FLAG);
+ return 1;
+ case BPF_SOCK_OPS_RTT_CB:
+ break;
+ default:
+ return 1;
+ }
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ next_dump = bpf_sk_storage_get(&bpf_next_dump, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!next_dump)
+ return 1;
+
+ now = bpf_ktime_get_ns();
+ if (now < *next_dump)
+ return 1;
+
+ tcp_sk = bpf_tcp_sock(sk);
+ if (!tcp_sk)
+ return 1;
+
+ *next_dump = now + INTERVAL;
+
+ bpf_printk("dsack_dups=%u delivered=%u\n",
+ tcp_sk->dsack_dups, tcp_sk->delivered);
+ bpf_printk("delivered_ce=%u icsk_retransmits=%u\n",
+ tcp_sk->delivered_ce, tcp_sk->icsk_retransmits);
+
+ return 1;
+}
diff --git a/samples/bpf/tcp_iw_kern.c b/samples/bpf/tcp_iw_kern.c
new file mode 100644
index 000000000..d14445573
--- /dev/null
+++ b/samples/bpf/tcp_iw_kern.c
@@ -0,0 +1,83 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * BPF program to set initial congestion window and initial receive
+ * window to 40 packets and send and receive buffers to 1.5MB. This
+ * would usually be done after doing appropriate checks that indicate
+ * the hosts are far enough away (i.e. large RTT).
+ *
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <linux/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define DEBUG 1
+
+SEC("sockops")
+int bpf_iw(struct bpf_sock_ops *skops)
+{
+ int bufsize = 1500000;
+ int rwnd_init = 40;
+ int iw = 40;
+ int rv = 0;
+ int op;
+
+ /* For testing purposes, only execute rest of BPF program
+ * if neither port numberis 55601
+ */
+ if (bpf_ntohl(skops->remote_port) != 55601 &&
+ skops->local_port != 55601) {
+ skops->reply = -1;
+ return 1;
+ }
+
+ op = (int) skops->op;
+
+#ifdef DEBUG
+ bpf_printk("BPF command: %d\n", op);
+#endif
+
+ /* Usually there would be a check to insure the hosts are far
+ * from each other so it makes sense to increase buffer sizes
+ */
+ switch (op) {
+ case BPF_SOCK_OPS_RWND_INIT:
+ rv = rwnd_init;
+ break;
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ /* Set sndbuf and rcvbuf of active connections */
+ rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize,
+ sizeof(bufsize));
+ rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF,
+ &bufsize, sizeof(bufsize));
+ break;
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ rv = bpf_setsockopt(skops, SOL_TCP, TCP_BPF_IW, &iw,
+ sizeof(iw));
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ /* Set sndbuf and rcvbuf of passive connections */
+ rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize,
+ sizeof(bufsize));
+ rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF,
+ &bufsize, sizeof(bufsize));
+ break;
+ default:
+ rv = -1;
+ }
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tcp_rwnd_kern.c b/samples/bpf/tcp_rwnd_kern.c
new file mode 100644
index 000000000..223d9c23b
--- /dev/null
+++ b/samples/bpf/tcp_rwnd_kern.c
@@ -0,0 +1,64 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * BPF program to set initial receive window to 40 packets when using IPv6
+ * and the first 5.5 bytes of the IPv6 addresses are not the same (in this
+ * example that means both hosts are not the same datacenter).
+ *
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <linux/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define DEBUG 1
+
+SEC("sockops")
+int bpf_rwnd(struct bpf_sock_ops *skops)
+{
+ int rv = -1;
+ int op;
+
+ /* For testing purposes, only execute rest of BPF program
+ * if neither port numberis 55601
+ */
+ if (bpf_ntohl(skops->remote_port) !=
+ 55601 && skops->local_port != 55601) {
+ skops->reply = -1;
+ return 1;
+ }
+
+ op = (int) skops->op;
+
+#ifdef DEBUG
+ bpf_printk("BPF command: %d\n", op);
+#endif
+
+ /* Check for RWND_INIT operation and IPv6 addresses */
+ if (op == BPF_SOCK_OPS_RWND_INIT &&
+ skops->family == AF_INET6) {
+
+ /* If the first 5.5 bytes of the IPv6 address are not the same
+ * then both hosts are not in the same datacenter
+ * so use a larger initial advertized window (40 packets)
+ */
+ if (skops->local_ip6[0] != skops->remote_ip6[0] ||
+ (bpf_ntohl(skops->local_ip6[1]) & 0xfffff000) !=
+ (bpf_ntohl(skops->remote_ip6[1]) & 0xfffff000))
+ rv = 40;
+ }
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tcp_synrto_kern.c b/samples/bpf/tcp_synrto_kern.c
new file mode 100644
index 000000000..d58004eef
--- /dev/null
+++ b/samples/bpf/tcp_synrto_kern.c
@@ -0,0 +1,64 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * BPF program to set SYN and SYN-ACK RTOs to 10ms when using IPv6 addresses
+ * and the first 5.5 bytes of the IPv6 addresses are the same (in this example
+ * that means both hosts are in the same datacenter).
+ *
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <linux/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define DEBUG 1
+
+SEC("sockops")
+int bpf_synrto(struct bpf_sock_ops *skops)
+{
+ int rv = -1;
+ int op;
+
+ /* For testing purposes, only execute rest of BPF program
+ * if neither port numberis 55601
+ */
+ if (bpf_ntohl(skops->remote_port) != 55601 &&
+ skops->local_port != 55601) {
+ skops->reply = -1;
+ return 1;
+ }
+
+ op = (int) skops->op;
+
+#ifdef DEBUG
+ bpf_printk("BPF command: %d\n", op);
+#endif
+
+ /* Check for TIMEOUT_INIT operation and IPv6 addresses */
+ if (op == BPF_SOCK_OPS_TIMEOUT_INIT &&
+ skops->family == AF_INET6) {
+
+ /* If the first 5.5 bytes of the IPv6 address are the same
+ * then both hosts are in the same datacenter
+ * so use an RTO of 10ms
+ */
+ if (skops->local_ip6[0] == skops->remote_ip6[0] &&
+ (bpf_ntohl(skops->local_ip6[1]) & 0xfff00000) ==
+ (bpf_ntohl(skops->remote_ip6[1]) & 0xfff00000))
+ rv = 10;
+ }
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tcp_tos_reflect_kern.c b/samples/bpf/tcp_tos_reflect_kern.c
new file mode 100644
index 000000000..953fedc79
--- /dev/null
+++ b/samples/bpf/tcp_tos_reflect_kern.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Facebook
+ *
+ * BPF program to automatically reflect TOS option from received syn packet
+ *
+ * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/in.h>
+#include <linux/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define DEBUG 1
+
+SEC("sockops")
+int bpf_basertt(struct bpf_sock_ops *skops)
+{
+ char header[sizeof(struct ipv6hdr)];
+ struct ipv6hdr *hdr6;
+ struct iphdr *hdr;
+ int hdr_size = 0;
+ int save_syn = 1;
+ int tos = 0;
+ int rv = 0;
+ int op;
+
+ op = (int) skops->op;
+
+#ifdef DEBUG
+ bpf_printk("BPF command: %d\n", op);
+#endif
+ switch (op) {
+ case BPF_SOCK_OPS_TCP_LISTEN_CB:
+ rv = bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
+ &save_syn, sizeof(save_syn));
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ if (skops->family == AF_INET)
+ hdr_size = sizeof(struct iphdr);
+ else
+ hdr_size = sizeof(struct ipv6hdr);
+ rv = bpf_getsockopt(skops, SOL_TCP, TCP_SAVED_SYN,
+ header, hdr_size);
+ if (!rv) {
+ if (skops->family == AF_INET) {
+ hdr = (struct iphdr *) header;
+ tos = hdr->tos;
+ if (tos != 0)
+ bpf_setsockopt(skops, SOL_IP, IP_TOS,
+ &tos, sizeof(tos));
+ } else {
+ hdr6 = (struct ipv6hdr *) header;
+ tos = ((hdr6->priority) << 4 |
+ (hdr6->flow_lbl[0]) >> 4);
+ if (tos)
+ bpf_setsockopt(skops, SOL_IPV6,
+ IPV6_TCLASS,
+ &tos, sizeof(tos));
+ }
+ rv = 0;
+ }
+ break;
+ default:
+ rv = -1;
+ }
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_cgrp2_array_pin.c b/samples/bpf/test_cgrp2_array_pin.c
new file mode 100644
index 000000000..05e88aa63
--- /dev/null
+++ b/samples/bpf/test_cgrp2_array_pin.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 Facebook
+ */
+#include <linux/unistd.h>
+#include <linux/bpf.h>
+
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <bpf/bpf.h>
+
+static void usage(void)
+{
+ printf("Usage: test_cgrp2_array_pin [...]\n");
+ printf(" -F <file> File to pin an BPF cgroup array\n");
+ printf(" -U <file> Update an already pinned BPF cgroup array\n");
+ printf(" -v <value> Full path of the cgroup2\n");
+ printf(" -h Display this help\n");
+}
+
+int main(int argc, char **argv)
+{
+ const char *pinned_file = NULL, *cg2 = NULL;
+ int create_array = 1;
+ int array_key = 0;
+ int array_fd = -1;
+ int cg2_fd = -1;
+ int ret = -1;
+ int opt;
+
+ while ((opt = getopt(argc, argv, "F:U:v:")) != -1) {
+ switch (opt) {
+ /* General args */
+ case 'F':
+ pinned_file = optarg;
+ break;
+ case 'U':
+ pinned_file = optarg;
+ create_array = 0;
+ break;
+ case 'v':
+ cg2 = optarg;
+ break;
+ default:
+ usage();
+ goto out;
+ }
+ }
+
+ if (!cg2 || !pinned_file) {
+ usage();
+ goto out;
+ }
+
+ cg2_fd = open(cg2, O_RDONLY);
+ if (cg2_fd < 0) {
+ fprintf(stderr, "open(%s,...): %s(%d)\n",
+ cg2, strerror(errno), errno);
+ goto out;
+ }
+
+ if (create_array) {
+ array_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_ARRAY, NULL,
+ sizeof(uint32_t), sizeof(uint32_t),
+ 1, NULL);
+ if (array_fd < 0) {
+ fprintf(stderr,
+ "bpf_create_map(BPF_MAP_TYPE_CGROUP_ARRAY,...): %s(%d)\n",
+ strerror(errno), errno);
+ goto out;
+ }
+ } else {
+ array_fd = bpf_obj_get(pinned_file);
+ if (array_fd < 0) {
+ fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
+ pinned_file, strerror(errno), errno);
+ goto out;
+ }
+ }
+
+ ret = bpf_map_update_elem(array_fd, &array_key, &cg2_fd, 0);
+ if (ret) {
+ perror("bpf_map_update_elem");
+ goto out;
+ }
+
+ if (create_array) {
+ ret = bpf_obj_pin(array_fd, pinned_file);
+ if (ret) {
+ fprintf(stderr, "bpf_obj_pin(..., %s): %s(%d)\n",
+ pinned_file, strerror(errno), errno);
+ goto out;
+ }
+ }
+
+out:
+ if (array_fd != -1)
+ close(array_fd);
+ if (cg2_fd != -1)
+ close(cg2_fd);
+ return ret;
+}
diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c
new file mode 100644
index 000000000..68ce69457
--- /dev/null
+++ b/samples/bpf/test_cgrp2_attach.c
@@ -0,0 +1,177 @@
+/* eBPF example program:
+ *
+ * - Creates arraymap in kernel with 4 bytes keys and 8 byte values
+ *
+ * - Loads eBPF program
+ *
+ * The eBPF program accesses the map passed in to store two pieces of
+ * information. The number of invocations of the program, which maps
+ * to the number of packets received, is stored to key 0. Key 1 is
+ * incremented on each iteration by the number of bytes stored in
+ * the skb.
+ *
+ * - Attaches the new program to a cgroup using BPF_PROG_ATTACH
+ *
+ * - Every second, reads map[0] and map[1] to see how many bytes and
+ * packets were seen on any socket of tasks in the given cgroup.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+
+#include "bpf_insn.h"
+#include "bpf_util.h"
+
+enum {
+ MAP_KEY_PACKETS,
+ MAP_KEY_BYTES,
+};
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+static int prog_load(int map_fd, int verdict)
+{
+ struct bpf_insn prog[] = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), /* save r6 so it's not clobbered by BPF_CALL */
+
+ /* Count packets */
+ BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_PACKETS), /* r0 = 0 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+ BPF_LD_MAP_FD(BPF_REG_1, map_fd), /* load map fd to r1 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
+
+ /* Count bytes */
+ BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+ BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */
+
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
+
+ BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = ARRAY_SIZE(prog);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = bpf_log_buf,
+ .log_size = BPF_LOG_BUF_SIZE,
+ );
+
+ return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB, NULL, "GPL",
+ prog, insns_cnt, &opts);
+}
+
+static int usage(const char *argv0)
+{
+ printf("Usage: %s [-d] [-D] <cg-path> <egress|ingress>\n", argv0);
+ printf(" -d Drop Traffic\n");
+ printf(" -D Detach filter, and exit\n");
+ return EXIT_FAILURE;
+}
+
+static int attach_filter(int cg_fd, int type, int verdict)
+{
+ int prog_fd, map_fd, ret, key;
+ long long pkt_cnt, byte_cnt;
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL,
+ sizeof(key), sizeof(byte_cnt),
+ 256, NULL);
+ if (map_fd < 0) {
+ printf("Failed to create map: '%s'\n", strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ prog_fd = prog_load(map_fd, verdict);
+ printf("Output from kernel verifier:\n%s\n-------\n", bpf_log_buf);
+
+ if (prog_fd < 0) {
+ printf("Failed to load prog: '%s'\n", strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ ret = bpf_prog_attach(prog_fd, cg_fd, type, 0);
+ if (ret < 0) {
+ printf("Failed to attach prog to cgroup: '%s'\n",
+ strerror(errno));
+ return EXIT_FAILURE;
+ }
+ while (1) {
+ key = MAP_KEY_PACKETS;
+ assert(bpf_map_lookup_elem(map_fd, &key, &pkt_cnt) == 0);
+
+ key = MAP_KEY_BYTES;
+ assert(bpf_map_lookup_elem(map_fd, &key, &byte_cnt) == 0);
+
+ printf("cgroup received %lld packets, %lld bytes\n",
+ pkt_cnt, byte_cnt);
+ sleep(1);
+ }
+
+ return EXIT_SUCCESS;
+}
+
+int main(int argc, char **argv)
+{
+ int detach_only = 0, verdict = 1;
+ enum bpf_attach_type type;
+ int opt, cg_fd, ret;
+
+ while ((opt = getopt(argc, argv, "Dd")) != -1) {
+ switch (opt) {
+ case 'd':
+ verdict = 0;
+ break;
+ case 'D':
+ detach_only = 1;
+ break;
+ default:
+ return usage(argv[0]);
+ }
+ }
+
+ if (argc - optind < 2)
+ return usage(argv[0]);
+
+ if (strcmp(argv[optind + 1], "ingress") == 0)
+ type = BPF_CGROUP_INET_INGRESS;
+ else if (strcmp(argv[optind + 1], "egress") == 0)
+ type = BPF_CGROUP_INET_EGRESS;
+ else
+ return usage(argv[0]);
+
+ cg_fd = open(argv[optind], O_DIRECTORY | O_RDONLY);
+ if (cg_fd < 0) {
+ printf("Failed to open cgroup path: '%s'\n", strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ if (detach_only) {
+ ret = bpf_prog_detach(cg_fd, type);
+ printf("bpf_prog_detach() returned '%s' (%d)\n",
+ strerror(errno), errno);
+ } else
+ ret = attach_filter(cg_fd, type, verdict);
+
+ return ret;
+}
diff --git a/samples/bpf/test_cgrp2_sock.c b/samples/bpf/test_cgrp2_sock.c
new file mode 100644
index 000000000..a0811df88
--- /dev/null
+++ b/samples/bpf/test_cgrp2_sock.c
@@ -0,0 +1,294 @@
+/* eBPF example program:
+ *
+ * - Loads eBPF program
+ *
+ * The eBPF program sets the sk_bound_dev_if index in new AF_INET{6}
+ * sockets opened by processes in the cgroup.
+ *
+ * - Attaches the new program to a cgroup using BPF_PROG_ATTACH
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <net/if.h>
+#include <inttypes.h>
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+
+#include "bpf_insn.h"
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+static int prog_load(__u32 idx, __u32 mark, __u32 prio)
+{
+ /* save pointer to context */
+ struct bpf_insn prog_start[] = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ };
+ struct bpf_insn prog_end[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */
+ BPF_EXIT_INSN(),
+ };
+
+ /* set sk_bound_dev_if on socket */
+ struct bpf_insn prog_dev[] = {
+ BPF_MOV64_IMM(BPF_REG_3, idx),
+ BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, bound_dev_if)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, bound_dev_if)),
+ };
+
+ /* set mark on socket */
+ struct bpf_insn prog_mark[] = {
+ /* get uid of process */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_current_uid_gid),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff),
+
+ /* if uid is 0, use given mark, else use the uid as the mark */
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, mark),
+
+ /* set the mark on the new socket */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, mark)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, mark)),
+ };
+
+ /* set priority on socket */
+ struct bpf_insn prog_prio[] = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_3, prio),
+ BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, priority)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, priority)),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = bpf_log_buf,
+ .log_size = BPF_LOG_BUF_SIZE,
+ );
+
+ struct bpf_insn *prog;
+ size_t insns_cnt;
+ void *p;
+ int ret;
+
+ insns_cnt = sizeof(prog_start) + sizeof(prog_end);
+ if (idx)
+ insns_cnt += sizeof(prog_dev);
+
+ if (mark)
+ insns_cnt += sizeof(prog_mark);
+
+ if (prio)
+ insns_cnt += sizeof(prog_prio);
+
+ p = prog = malloc(insns_cnt);
+ if (!prog) {
+ fprintf(stderr, "Failed to allocate memory for instructions\n");
+ return EXIT_FAILURE;
+ }
+
+ memcpy(p, prog_start, sizeof(prog_start));
+ p += sizeof(prog_start);
+
+ if (idx) {
+ memcpy(p, prog_dev, sizeof(prog_dev));
+ p += sizeof(prog_dev);
+ }
+
+ if (mark) {
+ memcpy(p, prog_mark, sizeof(prog_mark));
+ p += sizeof(prog_mark);
+ }
+
+ if (prio) {
+ memcpy(p, prog_prio, sizeof(prog_prio));
+ p += sizeof(prog_prio);
+ }
+
+ memcpy(p, prog_end, sizeof(prog_end));
+ p += sizeof(prog_end);
+
+ insns_cnt /= sizeof(struct bpf_insn);
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL",
+ prog, insns_cnt, &opts);
+
+ free(prog);
+
+ return ret;
+}
+
+static int get_bind_to_device(int sd, char *name, size_t len)
+{
+ socklen_t optlen = len;
+ int rc;
+
+ name[0] = '\0';
+ rc = getsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, name, &optlen);
+ if (rc < 0)
+ perror("setsockopt(SO_BINDTODEVICE)");
+
+ return rc;
+}
+
+static unsigned int get_somark(int sd)
+{
+ unsigned int mark = 0;
+ socklen_t optlen = sizeof(mark);
+ int rc;
+
+ rc = getsockopt(sd, SOL_SOCKET, SO_MARK, &mark, &optlen);
+ if (rc < 0)
+ perror("getsockopt(SO_MARK)");
+
+ return mark;
+}
+
+static unsigned int get_priority(int sd)
+{
+ unsigned int prio = 0;
+ socklen_t optlen = sizeof(prio);
+ int rc;
+
+ rc = getsockopt(sd, SOL_SOCKET, SO_PRIORITY, &prio, &optlen);
+ if (rc < 0)
+ perror("getsockopt(SO_PRIORITY)");
+
+ return prio;
+}
+
+static int show_sockopts(int family)
+{
+ unsigned int mark, prio;
+ char name[16];
+ int sd;
+
+ sd = socket(family, SOCK_DGRAM, 17);
+ if (sd < 0) {
+ perror("socket");
+ return 1;
+ }
+
+ if (get_bind_to_device(sd, name, sizeof(name)) < 0)
+ return 1;
+
+ mark = get_somark(sd);
+ prio = get_priority(sd);
+
+ close(sd);
+
+ printf("sd %d: dev %s, mark %u, priority %u\n", sd, name, mark, prio);
+
+ return 0;
+}
+
+static int usage(const char *argv0)
+{
+ printf("Usage:\n");
+ printf(" Attach a program\n");
+ printf(" %s -b bind-to-dev -m mark -p prio cg-path\n", argv0);
+ printf("\n");
+ printf(" Detach a program\n");
+ printf(" %s -d cg-path\n", argv0);
+ printf("\n");
+ printf(" Show inherited socket settings (mark, priority, and device)\n");
+ printf(" %s [-6]\n", argv0);
+ return EXIT_FAILURE;
+}
+
+int main(int argc, char **argv)
+{
+ __u32 idx = 0, mark = 0, prio = 0;
+ const char *cgrp_path = NULL;
+ int cg_fd, prog_fd, ret;
+ int family = PF_INET;
+ int do_attach = 1;
+ int rc;
+
+ while ((rc = getopt(argc, argv, "db:m:p:6")) != -1) {
+ switch (rc) {
+ case 'd':
+ do_attach = 0;
+ break;
+ case 'b':
+ idx = if_nametoindex(optarg);
+ if (!idx) {
+ idx = strtoumax(optarg, NULL, 0);
+ if (!idx) {
+ printf("Invalid device name\n");
+ return EXIT_FAILURE;
+ }
+ }
+ break;
+ case 'm':
+ mark = strtoumax(optarg, NULL, 0);
+ break;
+ case 'p':
+ prio = strtoumax(optarg, NULL, 0);
+ break;
+ case '6':
+ family = PF_INET6;
+ break;
+ default:
+ return usage(argv[0]);
+ }
+ }
+
+ if (optind == argc)
+ return show_sockopts(family);
+
+ cgrp_path = argv[optind];
+ if (!cgrp_path) {
+ fprintf(stderr, "cgroup path not given\n");
+ return EXIT_FAILURE;
+ }
+
+ if (do_attach && !idx && !mark && !prio) {
+ fprintf(stderr,
+ "One of device, mark or priority must be given\n");
+ return EXIT_FAILURE;
+ }
+
+ cg_fd = open(cgrp_path, O_DIRECTORY | O_RDONLY);
+ if (cg_fd < 0) {
+ printf("Failed to open cgroup path: '%s'\n", strerror(errno));
+ return EXIT_FAILURE;
+ }
+
+ if (do_attach) {
+ prog_fd = prog_load(idx, mark, prio);
+ if (prog_fd < 0) {
+ printf("Failed to load prog: '%s'\n", strerror(errno));
+ printf("Output from kernel verifier:\n%s\n-------\n",
+ bpf_log_buf);
+ return EXIT_FAILURE;
+ }
+
+ ret = bpf_prog_attach(prog_fd, cg_fd,
+ BPF_CGROUP_INET_SOCK_CREATE, 0);
+ if (ret < 0) {
+ printf("Failed to attach prog to cgroup: '%s'\n",
+ strerror(errno));
+ return EXIT_FAILURE;
+ }
+ } else {
+ ret = bpf_prog_detach(cg_fd, BPF_CGROUP_INET_SOCK_CREATE);
+ if (ret < 0) {
+ printf("Failed to detach prog from cgroup: '%s'\n",
+ strerror(errno));
+ return EXIT_FAILURE;
+ }
+ }
+
+ close(cg_fd);
+ return EXIT_SUCCESS;
+}
diff --git a/samples/bpf/test_cgrp2_sock.sh b/samples/bpf/test_cgrp2_sock.sh
new file mode 100755
index 000000000..9f6174236
--- /dev/null
+++ b/samples/bpf/test_cgrp2_sock.sh
@@ -0,0 +1,135 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Test various socket options that can be set by attaching programs to cgroups.
+
+CGRP_MNT="/tmp/cgroupv2-test_cgrp2_sock"
+
+################################################################################
+#
+print_result()
+{
+ local rc=$1
+ local status=" OK "
+
+ [ $rc -ne 0 ] && status="FAIL"
+
+ printf "%-50s [%4s]\n" "$2" "$status"
+}
+
+check_sock()
+{
+ out=$(test_cgrp2_sock)
+ echo $out | grep -q "$1"
+ if [ $? -ne 0 ]; then
+ print_result 1 "IPv4: $2"
+ echo " expected: $1"
+ echo " have: $out"
+ rc=1
+ else
+ print_result 0 "IPv4: $2"
+ fi
+}
+
+check_sock6()
+{
+ out=$(test_cgrp2_sock -6)
+ echo $out | grep -q "$1"
+ if [ $? -ne 0 ]; then
+ print_result 1 "IPv6: $2"
+ echo " expected: $1"
+ echo " have: $out"
+ rc=1
+ else
+ print_result 0 "IPv6: $2"
+ fi
+}
+
+################################################################################
+#
+
+cleanup()
+{
+ echo $$ >> ${CGRP_MNT}/cgroup.procs
+ rmdir ${CGRP_MNT}/sockopts
+}
+
+cleanup_and_exit()
+{
+ local rc=$1
+ local msg="$2"
+
+ [ -n "$msg" ] && echo "ERROR: $msg"
+
+ test_cgrp2_sock -d ${CGRP_MNT}/sockopts
+ ip li del cgrp2_sock
+ umount ${CGRP_MNT}
+
+ exit $rc
+}
+
+
+################################################################################
+# main
+
+rc=0
+
+ip li add cgrp2_sock type dummy 2>/dev/null
+
+set -e
+mkdir -p ${CGRP_MNT}
+mount -t cgroup2 none ${CGRP_MNT}
+set +e
+
+
+# make sure we have a known start point
+cleanup 2>/dev/null
+
+mkdir -p ${CGRP_MNT}/sockopts
+[ $? -ne 0 ] && cleanup_and_exit 1 "Failed to create cgroup hierarchy"
+
+
+# set pid into cgroup
+echo $$ > ${CGRP_MNT}/sockopts/cgroup.procs
+
+# no bpf program attached, so socket should show no settings
+check_sock "dev , mark 0, priority 0" "No programs attached"
+check_sock6 "dev , mark 0, priority 0" "No programs attached"
+
+# verify device is set
+#
+test_cgrp2_sock -b cgrp2_sock ${CGRP_MNT}/sockopts
+if [ $? -ne 0 ]; then
+ cleanup_and_exit 1 "Failed to install program to set device"
+fi
+check_sock "dev cgrp2_sock, mark 0, priority 0" "Device set"
+check_sock6 "dev cgrp2_sock, mark 0, priority 0" "Device set"
+
+# verify mark is set
+#
+test_cgrp2_sock -m 666 ${CGRP_MNT}/sockopts
+if [ $? -ne 0 ]; then
+ cleanup_and_exit 1 "Failed to install program to set mark"
+fi
+check_sock "dev , mark 666, priority 0" "Mark set"
+check_sock6 "dev , mark 666, priority 0" "Mark set"
+
+# verify priority is set
+#
+test_cgrp2_sock -p 123 ${CGRP_MNT}/sockopts
+if [ $? -ne 0 ]; then
+ cleanup_and_exit 1 "Failed to install program to set priority"
+fi
+check_sock "dev , mark 0, priority 123" "Priority set"
+check_sock6 "dev , mark 0, priority 123" "Priority set"
+
+# all 3 at once
+#
+test_cgrp2_sock -b cgrp2_sock -m 666 -p 123 ${CGRP_MNT}/sockopts
+if [ $? -ne 0 ]; then
+ cleanup_and_exit 1 "Failed to install program to set device, mark and priority"
+fi
+check_sock "dev cgrp2_sock, mark 666, priority 123" "Priority set"
+check_sock6 "dev cgrp2_sock, mark 666, priority 123" "Priority set"
+
+cleanup_and_exit $rc
diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c
new file mode 100644
index 000000000..e7060aaa2
--- /dev/null
+++ b/samples/bpf/test_cgrp2_sock2.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* eBPF example program:
+ *
+ * - Loads eBPF program
+ *
+ * The eBPF program loads a filter from file and attaches the
+ * program to a cgroup using BPF_PROG_ATTACH
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <net/if.h>
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_insn.h"
+
+static int usage(const char *argv0)
+{
+ printf("Usage: %s cg-path filter-path [filter-id]\n", argv0);
+ return EXIT_FAILURE;
+}
+
+int main(int argc, char **argv)
+{
+ int cg_fd, err, ret = EXIT_FAILURE, filter_id = 0, prog_cnt = 0;
+ const char *link_pin_path = "/sys/fs/bpf/test_cgrp2_sock2";
+ struct bpf_link *link = NULL;
+ struct bpf_program *progs[2];
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+
+ if (argc < 3)
+ return usage(argv[0]);
+
+ if (argc > 3)
+ filter_id = atoi(argv[3]);
+
+ cg_fd = open(argv[1], O_DIRECTORY | O_RDONLY);
+ if (cg_fd < 0) {
+ printf("Failed to open cgroup path: '%s'\n", strerror(errno));
+ return ret;
+ }
+
+ obj = bpf_object__open_file(argv[2], NULL);
+ if (libbpf_get_error(obj)) {
+ printf("ERROR: opening BPF object file failed\n");
+ return ret;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ progs[prog_cnt] = prog;
+ prog_cnt++;
+ }
+
+ if (filter_id >= prog_cnt) {
+ printf("Invalid program id; program not found in file\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ printf("ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach_cgroup(progs[filter_id], cg_fd);
+ if (libbpf_get_error(link)) {
+ printf("ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
+ }
+
+ err = bpf_link__pin(link, link_pin_path);
+ if (err < 0) {
+ printf("ERROR: bpf_link__pin failed: %d\n", err);
+ goto cleanup;
+ }
+
+ ret = EXIT_SUCCESS;
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+ return ret;
+}
diff --git a/samples/bpf/test_cgrp2_sock2.sh b/samples/bpf/test_cgrp2_sock2.sh
new file mode 100755
index 000000000..6a3dbe642
--- /dev/null
+++ b/samples/bpf/test_cgrp2_sock2.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+BPFFS=/sys/fs/bpf
+LINK_PIN=$BPFFS/test_cgrp2_sock2
+
+function config_device {
+ ip netns add at_ns0
+ ip link add veth0 type veth peer name veth0b
+ ip link set veth0b up
+ ip link set veth0 netns at_ns0
+ ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
+ ip netns exec at_ns0 ip addr add 2401:db00::1/64 dev veth0 nodad
+ ip netns exec at_ns0 ip link set dev veth0 up
+ ip addr add 172.16.1.101/24 dev veth0b
+ ip addr add 2401:db00::2/64 dev veth0b nodad
+}
+
+function config_cgroup {
+ rm -rf /tmp/cgroupv2
+ mkdir -p /tmp/cgroupv2
+ mount -t cgroup2 none /tmp/cgroupv2
+ mkdir -p /tmp/cgroupv2/foo
+ echo $$ >> /tmp/cgroupv2/foo/cgroup.procs
+}
+
+function config_bpffs {
+ if mount | grep $BPFFS > /dev/null; then
+ echo "bpffs already mounted"
+ else
+ echo "bpffs not mounted. Mounting..."
+ mount -t bpf none $BPFFS
+ fi
+}
+
+function attach_bpf {
+ ./test_cgrp2_sock2 /tmp/cgroupv2/foo sock_flags_kern.o $1
+ [ $? -ne 0 ] && exit 1
+}
+
+function cleanup {
+ rm -rf $LINK_PIN
+ ip link del veth0b
+ ip netns delete at_ns0
+ umount /tmp/cgroupv2
+ rm -rf /tmp/cgroupv2
+}
+
+cleanup 2>/dev/null
+
+set -e
+config_device
+config_cgroup
+config_bpffs
+set +e
+
+#
+# Test 1 - fail ping6
+#
+attach_bpf 0
+ping -c1 -w1 172.16.1.100
+if [ $? -ne 0 ]; then
+ echo "ping failed when it should succeed"
+ cleanup
+ exit 1
+fi
+
+ping6 -c1 -w1 2401:db00::1
+if [ $? -eq 0 ]; then
+ echo "ping6 succeeded when it should not"
+ cleanup
+ exit 1
+fi
+
+rm -rf $LINK_PIN
+sleep 1 # Wait for link detach
+
+#
+# Test 2 - fail ping
+#
+attach_bpf 1
+ping6 -c1 -w1 2401:db00::1
+if [ $? -ne 0 ]; then
+ echo "ping6 failed when it should succeed"
+ cleanup
+ exit 1
+fi
+
+ping -c1 -w1 172.16.1.100
+if [ $? -eq 0 ]; then
+ echo "ping succeeded when it should not"
+ cleanup
+ exit 1
+fi
+
+cleanup
+echo
+echo "*** PASS ***"
diff --git a/samples/bpf/test_cgrp2_tc.sh b/samples/bpf/test_cgrp2_tc.sh
new file mode 100755
index 000000000..12faf5847
--- /dev/null
+++ b/samples/bpf/test_cgrp2_tc.sh
@@ -0,0 +1,185 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MY_DIR=$(dirname $0)
+# Details on the bpf prog
+BPF_CGRP2_ARRAY_NAME='test_cgrp2_array_pin'
+BPF_PROG="$MY_DIR/test_cgrp2_tc_kern.o"
+BPF_SECTION='filter'
+
+[ -z "$TC" ] && TC='tc'
+[ -z "$IP" ] && IP='ip'
+
+# Names of the veth interface, net namespace...etc.
+HOST_IFC='ve'
+NS_IFC='vens'
+NS='ns'
+
+find_mnt() {
+ cat /proc/mounts | \
+ awk '{ if ($3 == "'$1'" && mnt == "") { mnt = $2 }} END { print mnt }'
+}
+
+# Init cgroup2 vars
+init_cgrp2_vars() {
+ CGRP2_ROOT=$(find_mnt cgroup2)
+ if [ -z "$CGRP2_ROOT" ]
+ then
+ CGRP2_ROOT='/mnt/cgroup2'
+ MOUNT_CGRP2="yes"
+ fi
+ CGRP2_TC="$CGRP2_ROOT/tc"
+ CGRP2_TC_LEAF="$CGRP2_TC/leaf"
+}
+
+# Init bpf fs vars
+init_bpf_fs_vars() {
+ local bpf_fs_root=$(find_mnt bpf)
+ [ -n "$bpf_fs_root" ] || return -1
+ BPF_FS_TC_SHARE="$bpf_fs_root/tc/globals"
+}
+
+setup_cgrp2() {
+ case $1 in
+ start)
+ if [ "$MOUNT_CGRP2" == 'yes' ]
+ then
+ [ -d $CGRP2_ROOT ] || mkdir -p $CGRP2_ROOT
+ mount -t cgroup2 none $CGRP2_ROOT || return $?
+ fi
+ mkdir -p $CGRP2_TC_LEAF
+ ;;
+ *)
+ rmdir $CGRP2_TC_LEAF && rmdir $CGRP2_TC
+ [ "$MOUNT_CGRP2" == 'yes' ] && umount $CGRP2_ROOT
+ ;;
+ esac
+}
+
+setup_bpf_cgrp2_array() {
+ local bpf_cgrp2_array="$BPF_FS_TC_SHARE/$BPF_CGRP2_ARRAY_NAME"
+ case $1 in
+ start)
+ $MY_DIR/test_cgrp2_array_pin -U $bpf_cgrp2_array -v $CGRP2_TC
+ ;;
+ *)
+ [ -d "$BPF_FS_TC_SHARE" ] && rm -f $bpf_cgrp2_array
+ ;;
+ esac
+}
+
+setup_net() {
+ case $1 in
+ start)
+ $IP link add $HOST_IFC type veth peer name $NS_IFC || return $?
+ $IP link set dev $HOST_IFC up || return $?
+ sysctl -q net.ipv6.conf.$HOST_IFC.accept_dad=0
+
+ $IP netns add ns || return $?
+ $IP link set dev $NS_IFC netns ns || return $?
+ $IP -n $NS link set dev $NS_IFC up || return $?
+ $IP netns exec $NS sysctl -q net.ipv6.conf.$NS_IFC.accept_dad=0
+ $TC qdisc add dev $HOST_IFC clsact || return $?
+ $TC filter add dev $HOST_IFC egress bpf da obj $BPF_PROG sec $BPF_SECTION || return $?
+ ;;
+ *)
+ $IP netns del $NS
+ $IP link del $HOST_IFC
+ ;;
+ esac
+}
+
+run_in_cgrp() {
+ # Fork another bash and move it under the specified cgroup.
+ # It makes the cgroup cleanup easier at the end of the test.
+ cmd='echo $$ > '
+ cmd="$cmd $1/cgroup.procs; exec $2"
+ bash -c "$cmd"
+}
+
+do_test() {
+ run_in_cgrp $CGRP2_TC_LEAF "ping -6 -c3 ff02::1%$HOST_IFC >& /dev/null"
+ local dropped=$($TC -s qdisc show dev $HOST_IFC | tail -3 | \
+ awk '/drop/{print substr($7, 0, index($7, ",")-1)}')
+ if [[ $dropped -eq 0 ]]
+ then
+ echo "FAIL"
+ return 1
+ else
+ echo "Successfully filtered $dropped packets"
+ return 0
+ fi
+}
+
+do_exit() {
+ if [ "$DEBUG" == "yes" ] && [ "$MODE" != 'cleanuponly' ]
+ then
+ echo "------ DEBUG ------"
+ echo "mount: "; mount | egrep '(cgroup2|bpf)'; echo
+ echo "$CGRP2_TC_LEAF: "; ls -l $CGRP2_TC_LEAF; echo
+ if [ -d "$BPF_FS_TC_SHARE" ]
+ then
+ echo "$BPF_FS_TC_SHARE: "; ls -l $BPF_FS_TC_SHARE; echo
+ fi
+ echo "Host net:"
+ $IP netns
+ $IP link show dev $HOST_IFC
+ $IP -6 a show dev $HOST_IFC
+ $TC -s qdisc show dev $HOST_IFC
+ echo
+ echo "$NS net:"
+ $IP -n $NS link show dev $NS_IFC
+ $IP -n $NS -6 link show dev $NS_IFC
+ echo "------ DEBUG ------"
+ echo
+ fi
+
+ if [ "$MODE" != 'nocleanup' ]
+ then
+ setup_net stop
+ setup_bpf_cgrp2_array stop
+ setup_cgrp2 stop
+ fi
+}
+
+init_cgrp2_vars
+init_bpf_fs_vars
+
+while [[ $# -ge 1 ]]
+do
+ a="$1"
+ case $a in
+ debug)
+ DEBUG='yes'
+ shift 1
+ ;;
+ cleanup-only)
+ MODE='cleanuponly'
+ shift 1
+ ;;
+ no-cleanup)
+ MODE='nocleanup'
+ shift 1
+ ;;
+ *)
+ echo "test_cgrp2_tc [debug] [cleanup-only | no-cleanup]"
+ echo " debug: Print cgrp and network setup details at the end of the test"
+ echo " cleanup-only: Try to cleanup things from last test. No test will be run"
+ echo " no-cleanup: Run the test but don't do cleanup at the end"
+ echo "[Note: If no arg is given, it will run the test and do cleanup at the end]"
+ echo
+ exit -1
+ ;;
+ esac
+done
+
+trap do_exit 0
+
+[ "$MODE" == 'cleanuponly' ] && exit
+
+setup_cgrp2 start || exit $?
+setup_net start || exit $?
+init_bpf_fs_vars || exit $?
+setup_bpf_cgrp2_array start || exit $?
+do_test
+echo
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c
new file mode 100644
index 000000000..4dd532a31
--- /dev/null
+++ b/samples/bpf/test_cgrp2_tc_kern.c
@@ -0,0 +1,70 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/in6.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/pkt_cls.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* copy of 'struct ethhdr' without __packed */
+struct eth_hdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ unsigned short h_proto;
+};
+
+#define PIN_GLOBAL_NS 2
+struct bpf_elf_map {
+ __u32 type;
+ __u32 size_key;
+ __u32 size_value;
+ __u32 max_elem;
+ __u32 flags;
+ __u32 id;
+ __u32 pinning;
+};
+
+struct bpf_elf_map SEC("maps") test_cgrp2_array_pin = {
+ .type = BPF_MAP_TYPE_CGROUP_ARRAY,
+ .size_key = sizeof(uint32_t),
+ .size_value = sizeof(uint32_t),
+ .pinning = PIN_GLOBAL_NS,
+ .max_elem = 1,
+};
+
+SEC("filter")
+int handle_egress(struct __sk_buff *skb)
+{
+ void *data = (void *)(long)skb->data;
+ struct eth_hdr *eth = data;
+ struct ipv6hdr *ip6h = data + sizeof(*eth);
+ void *data_end = (void *)(long)skb->data_end;
+ char dont_care_msg[] = "dont care %04x %d\n";
+ char pass_msg[] = "pass\n";
+ char reject_msg[] = "reject\n";
+
+ /* single length check */
+ if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
+ return TC_ACT_OK;
+
+ if (eth->h_proto != htons(ETH_P_IPV6) ||
+ ip6h->nexthdr != IPPROTO_ICMPV6) {
+ bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg),
+ eth->h_proto, ip6h->nexthdr);
+ return TC_ACT_OK;
+ } else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
+ bpf_trace_printk(pass_msg, sizeof(pass_msg));
+ return TC_ACT_OK;
+ } else {
+ bpf_trace_printk(reject_msg, sizeof(reject_msg));
+ return TC_ACT_SHOT;
+ }
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_cls_bpf.sh b/samples/bpf/test_cls_bpf.sh
new file mode 100755
index 000000000..aaddd67b3
--- /dev/null
+++ b/samples/bpf/test_cls_bpf.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+function pktgen {
+ ../pktgen/pktgen_bench_xmit_mode_netif_receive.sh -i $IFC -s 64 \
+ -m 90:e2:ba:ff:ff:ff -d 192.168.0.1 -t 4
+ local dropped=`tc -s qdisc show dev $IFC | tail -3 | awk '/drop/{print $7}'`
+ if [ "$dropped" == "0," ]; then
+ echo "FAIL"
+ else
+ echo "Successfully filtered " $dropped " packets"
+ fi
+}
+
+function test {
+ echo -n "Loading bpf program '$2'... "
+ tc qdisc add dev $IFC clsact
+ tc filter add dev $IFC ingress bpf da obj $1 sec $2
+ local status=$?
+ if [ $status -ne 0 ]; then
+ echo "FAIL"
+ else
+ echo "ok"
+ pktgen
+ fi
+ tc qdisc del dev $IFC clsact
+}
+
+IFC=test_veth
+
+ip link add name $IFC type veth peer name pair_$IFC
+ip link set $IFC up
+ip link set pair_$IFC up
+
+test ./parse_simple.o simple
+test ./parse_varlen.o varlen
+test ./parse_ldabs.o ldabs
+ip link del dev $IFC
diff --git a/samples/bpf/test_current_task_under_cgroup_kern.c b/samples/bpf/test_current_task_under_cgroup_kern.c
new file mode 100644
index 000000000..fbd43e2bb
--- /dev/null
+++ b/samples/bpf/test_current_task_under_cgroup_kern.c
@@ -0,0 +1,44 @@
+/* Copyright (c) 2016 Sargun Dhillon <sargun@sargun.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include <bpf/bpf_helpers.h>
+#include <uapi/linux/utsname.h>
+#include "trace_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u32));
+ __uint(max_entries, 1);
+} cgroup_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, 1);
+} perf_map SEC(".maps");
+
+/* Writes the last PID that called sync to a map at index 0 */
+SEC("kprobe/" SYSCALL(sys_sync))
+int bpf_prog1(struct pt_regs *ctx)
+{
+ u64 pid = bpf_get_current_pid_tgid();
+ int idx = 0;
+
+ if (!bpf_current_task_under_cgroup(&cgroup_map, 0))
+ return 0;
+
+ bpf_map_update_elem(&perf_map, &idx, &pid, BPF_ANY);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
new file mode 100644
index 000000000..ac251a417
--- /dev/null
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 Sargun Dhillon <sargun@sargun.me>
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "cgroup_helpers.h"
+
+#define CGROUP_PATH "/my-cgroup"
+
+int main(int argc, char **argv)
+{
+ pid_t remote_pid, local_pid = getpid();
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ int cg2, idx = 0, rc = 1;
+ struct bpf_object *obj;
+ char filename[256];
+ int map_fd[2];
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (!prog) {
+ printf("finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "cgroup_map");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "perf_map");
+ if (map_fd[0] < 0 || map_fd[1] < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
+ }
+
+ if (setup_cgroup_environment())
+ goto err;
+
+ cg2 = create_and_get_cgroup(CGROUP_PATH);
+
+ if (cg2 < 0)
+ goto err;
+
+ if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
+ log_err("Adding target cgroup to map");
+ goto err;
+ }
+
+ if (join_cgroup(CGROUP_PATH))
+ goto err;
+
+ /*
+ * The installed helper program catched the sync call, and should
+ * write it to the map.
+ */
+
+ sync();
+ bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid);
+
+ if (local_pid != remote_pid) {
+ fprintf(stderr,
+ "BPF Helper didn't write correct PID to map, but: %d\n",
+ remote_pid);
+ goto err;
+ }
+
+ /* Verify the negative scenario; leave the cgroup */
+ if (join_cgroup("/"))
+ goto err;
+
+ remote_pid = 0;
+ bpf_map_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY);
+
+ sync();
+ bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid);
+
+ if (local_pid == remote_pid) {
+ fprintf(stderr, "BPF cgroup negative test did not work\n");
+ goto err;
+ }
+
+ rc = 0;
+
+err:
+ close(cg2);
+ cleanup_cgroup_environment();
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+ return rc;
+}
diff --git a/samples/bpf/test_lru_dist.c b/samples/bpf/test_lru_dist.c
new file mode 100644
index 000000000..5efb91763
--- /dev/null
+++ b/samples/bpf/test_lru_dist.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 Facebook
+ */
+#define _GNU_SOURCE
+#include <linux/types.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <linux/bpf.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include <sched.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include <bpf/bpf.h>
+#include "bpf_util.h"
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#ifndef offsetof
+# define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
+#endif
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+static int nr_cpus;
+static unsigned long long *dist_keys;
+static unsigned int dist_key_counts;
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+static inline void __list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+static inline void __list_del_entry(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add(list, head);
+}
+
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+struct pfect_lru_node {
+ struct list_head list;
+ unsigned long long key;
+};
+
+struct pfect_lru {
+ struct list_head list;
+ struct pfect_lru_node *free_nodes;
+ unsigned int cur_size;
+ unsigned int lru_size;
+ unsigned int nr_unique;
+ unsigned int nr_misses;
+ unsigned int total;
+ int map_fd;
+};
+
+static void pfect_lru_init(struct pfect_lru *lru, unsigned int lru_size,
+ unsigned int nr_possible_elems)
+{
+ lru->map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
+ sizeof(unsigned long long),
+ sizeof(struct pfect_lru_node *),
+ nr_possible_elems, NULL);
+ assert(lru->map_fd != -1);
+
+ lru->free_nodes = malloc(lru_size * sizeof(struct pfect_lru_node));
+ assert(lru->free_nodes);
+
+ INIT_LIST_HEAD(&lru->list);
+ lru->cur_size = 0;
+ lru->lru_size = lru_size;
+ lru->nr_unique = lru->nr_misses = lru->total = 0;
+}
+
+static void pfect_lru_destroy(struct pfect_lru *lru)
+{
+ close(lru->map_fd);
+ free(lru->free_nodes);
+}
+
+static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
+ unsigned long long key)
+{
+ struct pfect_lru_node *node = NULL;
+ int seen = 0;
+
+ lru->total++;
+ if (!bpf_map_lookup_elem(lru->map_fd, &key, &node)) {
+ if (node) {
+ list_move(&node->list, &lru->list);
+ return 1;
+ }
+ seen = 1;
+ }
+
+ if (lru->cur_size < lru->lru_size) {
+ node = &lru->free_nodes[lru->cur_size++];
+ INIT_LIST_HEAD(&node->list);
+ } else {
+ struct pfect_lru_node *null_node = NULL;
+
+ node = list_last_entry(&lru->list,
+ struct pfect_lru_node,
+ list);
+ bpf_map_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST);
+ }
+
+ node->key = key;
+ list_move(&node->list, &lru->list);
+
+ lru->nr_misses++;
+ if (seen) {
+ assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_EXIST));
+ } else {
+ lru->nr_unique++;
+ assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));
+ }
+
+ return seen;
+}
+
+static unsigned int read_keys(const char *dist_file,
+ unsigned long long **keys)
+{
+ struct stat fst;
+ unsigned long long *retkeys;
+ unsigned int counts = 0;
+ int dist_fd;
+ char *b, *l;
+ int i;
+
+ dist_fd = open(dist_file, 0);
+ assert(dist_fd != -1);
+
+ assert(fstat(dist_fd, &fst) == 0);
+ b = malloc(fst.st_size);
+ assert(b);
+
+ assert(read(dist_fd, b, fst.st_size) == fst.st_size);
+ close(dist_fd);
+ for (i = 0; i < fst.st_size; i++) {
+ if (b[i] == '\n')
+ counts++;
+ }
+ counts++; /* in case the last line has no \n */
+
+ retkeys = malloc(counts * sizeof(unsigned long long));
+ assert(retkeys);
+
+ counts = 0;
+ for (l = strtok(b, "\n"); l; l = strtok(NULL, "\n"))
+ retkeys[counts++] = strtoull(l, NULL, 10);
+ free(b);
+
+ *keys = retkeys;
+
+ return counts;
+}
+
+static int create_map(int map_type, int map_flags, unsigned int size)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts,
+ .map_flags = map_flags,
+ );
+ int map_fd;
+
+ map_fd = bpf_map_create(map_type, NULL, sizeof(unsigned long long),
+ sizeof(unsigned long long), size, &opts);
+
+ if (map_fd == -1)
+ perror("bpf_create_map");
+
+ return map_fd;
+}
+
+static int sched_next_online(int pid, int next_to_try)
+{
+ cpu_set_t cpuset;
+
+ if (next_to_try == nr_cpus)
+ return -1;
+
+ while (next_to_try < nr_cpus) {
+ CPU_ZERO(&cpuset);
+ CPU_SET(next_to_try++, &cpuset);
+ if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset))
+ break;
+ }
+
+ return next_to_try;
+}
+
+static void run_parallel(unsigned int tasks, void (*fn)(int i, void *data),
+ void *data)
+{
+ int next_sched_cpu = 0;
+ pid_t pid[tasks];
+ int i;
+
+ for (i = 0; i < tasks; i++) {
+ pid[i] = fork();
+ if (pid[i] == 0) {
+ next_sched_cpu = sched_next_online(0, next_sched_cpu);
+ fn(i, data);
+ exit(0);
+ } else if (pid[i] == -1) {
+ printf("couldn't spawn #%d process\n", i);
+ exit(1);
+ }
+ /* It is mostly redundant and just allow the parent
+ * process to update next_shced_cpu for the next child
+ * process
+ */
+ next_sched_cpu = sched_next_online(pid[i], next_sched_cpu);
+ }
+ for (i = 0; i < tasks; i++) {
+ int status;
+
+ assert(waitpid(pid[i], &status, 0) == pid[i]);
+ assert(status == 0);
+ }
+}
+
+static void do_test_lru_dist(int task, void *data)
+{
+ unsigned int nr_misses = 0;
+ struct pfect_lru pfect_lru;
+ unsigned long long key, value = 1234;
+ unsigned int i;
+
+ unsigned int lru_map_fd = ((unsigned int *)data)[0];
+ unsigned int lru_size = ((unsigned int *)data)[1];
+ unsigned long long key_offset = task * dist_key_counts;
+
+ pfect_lru_init(&pfect_lru, lru_size, dist_key_counts);
+
+ for (i = 0; i < dist_key_counts; i++) {
+ key = dist_keys[i] + key_offset;
+
+ pfect_lru_lookup_or_insert(&pfect_lru, key);
+
+ if (!bpf_map_lookup_elem(lru_map_fd, &key, &value))
+ continue;
+
+ if (bpf_map_update_elem(lru_map_fd, &key, &value, BPF_NOEXIST)) {
+ printf("bpf_map_update_elem(lru_map_fd, %llu): errno:%d\n",
+ key, errno);
+ assert(0);
+ }
+
+ nr_misses++;
+ }
+
+ printf(" task:%d BPF LRU: nr_unique:%u(/%u) nr_misses:%u(/%u)\n",
+ task, pfect_lru.nr_unique, dist_key_counts, nr_misses,
+ dist_key_counts);
+ printf(" task:%d Perfect LRU: nr_unique:%u(/%u) nr_misses:%u(/%u)\n",
+ task, pfect_lru.nr_unique, pfect_lru.total,
+ pfect_lru.nr_misses, pfect_lru.total);
+
+ pfect_lru_destroy(&pfect_lru);
+ close(lru_map_fd);
+}
+
+static void test_parallel_lru_dist(int map_type, int map_flags,
+ int nr_tasks, unsigned int lru_size)
+{
+ int child_data[2];
+ int lru_map_fd;
+
+ printf("%s (map_type:%d map_flags:0x%X):\n", __func__, map_type,
+ map_flags);
+
+ if (map_flags & BPF_F_NO_COMMON_LRU)
+ lru_map_fd = create_map(map_type, map_flags,
+ nr_cpus * lru_size);
+ else
+ lru_map_fd = create_map(map_type, map_flags,
+ nr_tasks * lru_size);
+ assert(lru_map_fd != -1);
+
+ child_data[0] = lru_map_fd;
+ child_data[1] = lru_size;
+
+ run_parallel(nr_tasks, do_test_lru_dist, child_data);
+
+ close(lru_map_fd);
+}
+
+static void test_lru_loss0(int map_type, int map_flags)
+{
+ unsigned long long key, value[nr_cpus];
+ unsigned int old_unused_losses = 0;
+ unsigned int new_unused_losses = 0;
+ unsigned int used_losses = 0;
+ int map_fd;
+
+ printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
+ map_flags);
+
+ assert(sched_next_online(0, 0) != -1);
+
+ if (map_flags & BPF_F_NO_COMMON_LRU)
+ map_fd = create_map(map_type, map_flags, 900 * nr_cpus);
+ else
+ map_fd = create_map(map_type, map_flags, 900);
+
+ assert(map_fd != -1);
+
+ value[0] = 1234;
+
+ for (key = 1; key <= 1000; key++) {
+ int start_key, end_key;
+
+ assert(bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
+
+ start_key = 101;
+ end_key = min(key, 900);
+
+ while (start_key <= end_key) {
+ bpf_map_lookup_elem(map_fd, &start_key, value);
+ start_key++;
+ }
+ }
+
+ for (key = 1; key <= 1000; key++) {
+ if (bpf_map_lookup_elem(map_fd, &key, value)) {
+ if (key <= 100)
+ old_unused_losses++;
+ else if (key <= 900)
+ used_losses++;
+ else
+ new_unused_losses++;
+ }
+ }
+
+ close(map_fd);
+
+ printf("older-elem-losses:%d(/100) active-elem-losses:%d(/800) "
+ "newer-elem-losses:%d(/100)\n",
+ old_unused_losses, used_losses, new_unused_losses);
+}
+
+static void test_lru_loss1(int map_type, int map_flags)
+{
+ unsigned long long key, value[nr_cpus];
+ int map_fd;
+ unsigned int nr_losses = 0;
+
+ printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
+ map_flags);
+
+ assert(sched_next_online(0, 0) != -1);
+
+ if (map_flags & BPF_F_NO_COMMON_LRU)
+ map_fd = create_map(map_type, map_flags, 1000 * nr_cpus);
+ else
+ map_fd = create_map(map_type, map_flags, 1000);
+
+ assert(map_fd != -1);
+
+ value[0] = 1234;
+
+ for (key = 1; key <= 1000; key++)
+ assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
+
+ for (key = 1; key <= 1000; key++) {
+ if (bpf_map_lookup_elem(map_fd, &key, value))
+ nr_losses++;
+ }
+
+ close(map_fd);
+
+ printf("nr_losses:%d(/1000)\n", nr_losses);
+}
+
+static void do_test_parallel_lru_loss(int task, void *data)
+{
+ const unsigned int nr_stable_elems = 1000;
+ const unsigned int nr_repeats = 100000;
+
+ int map_fd = *(int *)data;
+ unsigned long long stable_base;
+ unsigned long long key, value[nr_cpus];
+ unsigned long long next_ins_key;
+ unsigned int nr_losses = 0;
+ unsigned int i;
+
+ stable_base = task * nr_repeats * 2 + 1;
+ next_ins_key = stable_base;
+ value[0] = 1234;
+ for (i = 0; i < nr_stable_elems; i++) {
+ assert(bpf_map_update_elem(map_fd, &next_ins_key, value,
+ BPF_NOEXIST) == 0);
+ next_ins_key++;
+ }
+
+ for (i = 0; i < nr_repeats; i++) {
+ int rn;
+
+ rn = rand();
+
+ if (rn % 10) {
+ key = rn % nr_stable_elems + stable_base;
+ bpf_map_lookup_elem(map_fd, &key, value);
+ } else {
+ bpf_map_update_elem(map_fd, &next_ins_key, value,
+ BPF_NOEXIST);
+ next_ins_key++;
+ }
+ }
+
+ key = stable_base;
+ for (i = 0; i < nr_stable_elems; i++) {
+ if (bpf_map_lookup_elem(map_fd, &key, value))
+ nr_losses++;
+ key++;
+ }
+
+ printf(" task:%d nr_losses:%u\n", task, nr_losses);
+}
+
+static void test_parallel_lru_loss(int map_type, int map_flags, int nr_tasks)
+{
+ int map_fd;
+
+ printf("%s (map_type:%d map_flags:0x%X):\n", __func__, map_type,
+ map_flags);
+
+ /* Give 20% more than the active working set */
+ if (map_flags & BPF_F_NO_COMMON_LRU)
+ map_fd = create_map(map_type, map_flags,
+ nr_cpus * (1000 + 200));
+ else
+ map_fd = create_map(map_type, map_flags,
+ nr_tasks * (1000 + 200));
+
+ assert(map_fd != -1);
+
+ run_parallel(nr_tasks, do_test_parallel_lru_loss, &map_fd);
+
+ close(map_fd);
+}
+
+int main(int argc, char **argv)
+{
+ int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
+ const char *dist_file;
+ int nr_tasks = 1;
+ int lru_size;
+ int f;
+
+ if (argc < 4) {
+ printf("Usage: %s <dist-file> <lru-size> <nr-tasks>\n",
+ argv[0]);
+ return -1;
+ }
+
+ dist_file = argv[1];
+ lru_size = atoi(argv[2]);
+ nr_tasks = atoi(argv[3]);
+
+ setbuf(stdout, NULL);
+
+ srand(time(NULL));
+
+ nr_cpus = bpf_num_possible_cpus();
+ assert(nr_cpus != -1);
+ printf("nr_cpus:%d\n\n", nr_cpus);
+
+ nr_tasks = min(nr_tasks, nr_cpus);
+
+ dist_key_counts = read_keys(dist_file, &dist_keys);
+ if (!dist_key_counts) {
+ printf("%s has no key\n", dist_file);
+ return -1;
+ }
+
+ for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
+ test_lru_loss0(BPF_MAP_TYPE_LRU_HASH, map_flags[f]);
+ test_lru_loss1(BPF_MAP_TYPE_LRU_HASH, map_flags[f]);
+ test_parallel_lru_loss(BPF_MAP_TYPE_LRU_HASH, map_flags[f],
+ nr_tasks);
+ test_parallel_lru_dist(BPF_MAP_TYPE_LRU_HASH, map_flags[f],
+ nr_tasks, lru_size);
+ printf("\n");
+ }
+
+ free(dist_keys);
+
+ return 0;
+}
diff --git a/samples/bpf/test_lwt_bpf.c b/samples/bpf/test_lwt_bpf.c
new file mode 100644
index 000000000..1b568575a
--- /dev/null
+++ b/samples/bpf/test_lwt_bpf.c
@@ -0,0 +1,253 @@
+/* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmpv6.h>
+#include <linux/if_ether.h>
+#include <bpf/bpf_helpers.h>
+#include <string.h>
+
+# define printk(fmt, ...) \
+ ({ \
+ char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+ })
+
+#define CB_MAGIC 1234
+
+/* Test: Pass all packets through */
+SEC("nop")
+int do_nop(struct __sk_buff *skb)
+{
+ return BPF_OK;
+}
+
+/* Test: Verify context information can be accessed */
+SEC("test_ctx")
+int do_test_ctx(struct __sk_buff *skb)
+{
+ skb->cb[0] = CB_MAGIC;
+ printk("len %d hash %d protocol %d\n", skb->len, skb->hash,
+ skb->protocol);
+ printk("cb %d ingress_ifindex %d ifindex %d\n", skb->cb[0],
+ skb->ingress_ifindex, skb->ifindex);
+
+ return BPF_OK;
+}
+
+/* Test: Ensure skb->cb[] buffer is cleared */
+SEC("test_cb")
+int do_test_cb(struct __sk_buff *skb)
+{
+ printk("cb0: %x cb1: %x cb2: %x\n", skb->cb[0], skb->cb[1],
+ skb->cb[2]);
+ printk("cb3: %x cb4: %x\n", skb->cb[3], skb->cb[4]);
+
+ return BPF_OK;
+}
+
+/* Test: Verify skb data can be read */
+SEC("test_data")
+int do_test_data(struct __sk_buff *skb)
+{
+ void *data = (void *)(long)skb->data;
+ void *data_end = (void *)(long)skb->data_end;
+ struct iphdr *iph = data;
+
+ if (data + sizeof(*iph) > data_end) {
+ printk("packet truncated\n");
+ return BPF_DROP;
+ }
+
+ printk("src: %x dst: %x\n", iph->saddr, iph->daddr);
+
+ return BPF_OK;
+}
+
+#define IP_CSUM_OFF offsetof(struct iphdr, check)
+#define IP_DST_OFF offsetof(struct iphdr, daddr)
+#define IP_SRC_OFF offsetof(struct iphdr, saddr)
+#define IP_PROTO_OFF offsetof(struct iphdr, protocol)
+#define TCP_CSUM_OFF offsetof(struct tcphdr, check)
+#define UDP_CSUM_OFF offsetof(struct udphdr, check)
+#define IS_PSEUDO 0x10
+
+static inline int rewrite(struct __sk_buff *skb, uint32_t old_ip,
+ uint32_t new_ip, int rw_daddr)
+{
+ int ret, off = 0, flags = IS_PSEUDO;
+ uint8_t proto;
+
+ ret = bpf_skb_load_bytes(skb, IP_PROTO_OFF, &proto, 1);
+ if (ret < 0) {
+ printk("bpf_l4_csum_replace failed: %d\n", ret);
+ return BPF_DROP;
+ }
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ off = TCP_CSUM_OFF;
+ break;
+
+ case IPPROTO_UDP:
+ off = UDP_CSUM_OFF;
+ flags |= BPF_F_MARK_MANGLED_0;
+ break;
+
+ case IPPROTO_ICMPV6:
+ off = offsetof(struct icmp6hdr, icmp6_cksum);
+ break;
+ }
+
+ if (off) {
+ ret = bpf_l4_csum_replace(skb, off, old_ip, new_ip,
+ flags | sizeof(new_ip));
+ if (ret < 0) {
+ printk("bpf_l4_csum_replace failed: %d\n");
+ return BPF_DROP;
+ }
+ }
+
+ ret = bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
+ if (ret < 0) {
+ printk("bpf_l3_csum_replace failed: %d\n", ret);
+ return BPF_DROP;
+ }
+
+ if (rw_daddr)
+ ret = bpf_skb_store_bytes(skb, IP_DST_OFF, &new_ip, sizeof(new_ip), 0);
+ else
+ ret = bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
+
+ if (ret < 0) {
+ printk("bpf_skb_store_bytes() failed: %d\n", ret);
+ return BPF_DROP;
+ }
+
+ return BPF_OK;
+}
+
+/* Test: Verify skb data can be modified */
+SEC("test_rewrite")
+int do_test_rewrite(struct __sk_buff *skb)
+{
+ uint32_t old_ip, new_ip = 0x3fea8c0;
+ int ret;
+
+ ret = bpf_skb_load_bytes(skb, IP_DST_OFF, &old_ip, 4);
+ if (ret < 0) {
+ printk("bpf_skb_load_bytes failed: %d\n", ret);
+ return BPF_DROP;
+ }
+
+ if (old_ip == 0x2fea8c0) {
+ printk("out: rewriting from %x to %x\n", old_ip, new_ip);
+ return rewrite(skb, old_ip, new_ip, 1);
+ }
+
+ return BPF_OK;
+}
+
+static inline int __do_push_ll_and_redirect(struct __sk_buff *skb)
+{
+ uint64_t smac = SRC_MAC, dmac = DST_MAC;
+ int ret, ifindex = DST_IFINDEX;
+ struct ethhdr ehdr;
+
+ ret = bpf_skb_change_head(skb, 14, 0);
+ if (ret < 0) {
+ printk("skb_change_head() failed: %d\n", ret);
+ }
+
+ ehdr.h_proto = __constant_htons(ETH_P_IP);
+ memcpy(&ehdr.h_source, &smac, 6);
+ memcpy(&ehdr.h_dest, &dmac, 6);
+
+ ret = bpf_skb_store_bytes(skb, 0, &ehdr, sizeof(ehdr), 0);
+ if (ret < 0) {
+ printk("skb_store_bytes() failed: %d\n", ret);
+ return BPF_DROP;
+ }
+
+ return bpf_redirect(ifindex, 0);
+}
+
+SEC("push_ll_and_redirect_silent")
+int do_push_ll_and_redirect_silent(struct __sk_buff *skb)
+{
+ return __do_push_ll_and_redirect(skb);
+}
+
+SEC("push_ll_and_redirect")
+int do_push_ll_and_redirect(struct __sk_buff *skb)
+{
+ int ret, ifindex = DST_IFINDEX;
+
+ ret = __do_push_ll_and_redirect(skb);
+ if (ret >= 0)
+ printk("redirected to %d\n", ifindex);
+
+ return ret;
+}
+
+static inline void __fill_garbage(struct __sk_buff *skb)
+{
+ uint64_t f = 0xFFFFFFFFFFFFFFFF;
+
+ bpf_skb_store_bytes(skb, 0, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 8, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 16, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 24, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 32, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 40, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 48, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 56, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 64, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 72, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 80, &f, sizeof(f), 0);
+ bpf_skb_store_bytes(skb, 88, &f, sizeof(f), 0);
+}
+
+SEC("fill_garbage")
+int do_fill_garbage(struct __sk_buff *skb)
+{
+ __fill_garbage(skb);
+ printk("Set initial 96 bytes of header to FF\n");
+ return BPF_OK;
+}
+
+SEC("fill_garbage_and_redirect")
+int do_fill_garbage_and_redirect(struct __sk_buff *skb)
+{
+ int ifindex = DST_IFINDEX;
+ __fill_garbage(skb);
+ printk("redirected to %d\n", ifindex);
+ return bpf_redirect(ifindex, 0);
+}
+
+/* Drop all packets */
+SEC("drop_all")
+int do_drop_all(struct __sk_buff *skb)
+{
+ printk("dropping with: %d\n", BPF_DROP);
+ return BPF_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh
new file mode 100755
index 000000000..65a976058
--- /dev/null
+++ b/samples/bpf/test_lwt_bpf.sh
@@ -0,0 +1,400 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Uncomment to see generated bytecode
+#VERBOSE=verbose
+
+NS1=lwt_ns1
+NS2=lwt_ns2
+VETH0=tst_lwt1a
+VETH1=tst_lwt1b
+VETH2=tst_lwt2a
+VETH3=tst_lwt2b
+IPVETH0="192.168.254.1"
+IPVETH1="192.168.254.2"
+IPVETH1b="192.168.254.3"
+
+IPVETH2="192.168.111.1"
+IPVETH3="192.168.111.2"
+
+IP_LOCAL="192.168.99.1"
+
+TRACE_ROOT=/sys/kernel/debug/tracing
+
+function lookup_mac()
+{
+ set +x
+ if [ ! -z "$2" ]; then
+ MAC=$(ip netns exec $2 ip link show $1 | grep ether | awk '{print $2}')
+ else
+ MAC=$(ip link show $1 | grep ether | awk '{print $2}')
+ fi
+ MAC="${MAC//:/}"
+ echo "0x${MAC:10:2}${MAC:8:2}${MAC:6:2}${MAC:4:2}${MAC:2:2}${MAC:0:2}"
+ set -x
+}
+
+function cleanup {
+ set +ex
+ rm test_lwt_bpf.o 2> /dev/null
+ ip link del $VETH0 2> /dev/null
+ ip link del $VETH1 2> /dev/null
+ ip link del $VETH2 2> /dev/null
+ ip link del $VETH3 2> /dev/null
+ ip netns exec $NS1 killall netserver
+ ip netns delete $NS1 2> /dev/null
+ ip netns delete $NS2 2> /dev/null
+ set -ex
+}
+
+function setup_one_veth {
+ ip netns add $1
+ ip link add $2 type veth peer name $3
+ ip link set dev $2 up
+ ip addr add $4/24 dev $2
+ ip link set $3 netns $1
+ ip netns exec $1 ip link set dev $3 up
+ ip netns exec $1 ip addr add $5/24 dev $3
+
+ if [ "$6" ]; then
+ ip netns exec $1 ip addr add $6/32 dev $3
+ fi
+}
+
+function get_trace {
+ set +x
+ cat ${TRACE_ROOT}/trace | grep -v '^#'
+ set -x
+}
+
+function cleanup_routes {
+ ip route del ${IPVETH1}/32 dev $VETH0 2> /dev/null || true
+ ip route del table local local ${IP_LOCAL}/32 dev lo 2> /dev/null || true
+}
+
+function install_test {
+ cleanup_routes
+ cp /dev/null ${TRACE_ROOT}/trace
+
+ OPTS="encap bpf headroom 14 $1 obj test_lwt_bpf.o section $2 $VERBOSE"
+
+ if [ "$1" == "in" ]; then
+ ip route add table local local ${IP_LOCAL}/32 $OPTS dev lo
+ else
+ ip route add ${IPVETH1}/32 $OPTS dev $VETH0
+ fi
+}
+
+function remove_prog {
+ if [ "$1" == "in" ]; then
+ ip route del table local local ${IP_LOCAL}/32 dev lo
+ else
+ ip route del ${IPVETH1}/32 dev $VETH0
+ fi
+}
+
+function filter_trace {
+ # Add newline to allow starting EXPECT= variables on newline
+ NL=$'\n'
+ echo "${NL}$*" | sed -e 's/^.*: : //g'
+}
+
+function expect_fail {
+ set +x
+ echo "FAIL:"
+ echo "Expected: $1"
+ echo "Got: $2"
+ set -x
+ exit 1
+}
+
+function match_trace {
+ set +x
+ RET=0
+ TRACE=$1
+ EXPECT=$2
+ GOT="$(filter_trace "$TRACE")"
+
+ [ "$GOT" != "$EXPECT" ] && {
+ expect_fail "$EXPECT" "$GOT"
+ RET=1
+ }
+ set -x
+ return $RET
+}
+
+function test_start {
+ set +x
+ echo "----------------------------------------------------------------"
+ echo "Starting test: $*"
+ echo "----------------------------------------------------------------"
+ set -x
+}
+
+function failure {
+ get_trace
+ echo "FAIL: $*"
+ exit 1
+}
+
+function test_ctx_xmit {
+ test_start "test_ctx on lwt xmit"
+ install_test xmit test_ctx
+ ping -c 3 $IPVETH1 || {
+ failure "test_ctx xmit: packets are dropped"
+ }
+ match_trace "$(get_trace)" "
+len 84 hash 0 protocol 8
+cb 1234 ingress_ifindex 0 ifindex $DST_IFINDEX
+len 84 hash 0 protocol 8
+cb 1234 ingress_ifindex 0 ifindex $DST_IFINDEX
+len 84 hash 0 protocol 8
+cb 1234 ingress_ifindex 0 ifindex $DST_IFINDEX" || exit 1
+ remove_prog xmit
+}
+
+function test_ctx_out {
+ test_start "test_ctx on lwt out"
+ install_test out test_ctx
+ ping -c 3 $IPVETH1 || {
+ failure "test_ctx out: packets are dropped"
+ }
+ match_trace "$(get_trace)" "
+len 84 hash 0 protocol 0
+cb 1234 ingress_ifindex 0 ifindex 0
+len 84 hash 0 protocol 0
+cb 1234 ingress_ifindex 0 ifindex 0
+len 84 hash 0 protocol 0
+cb 1234 ingress_ifindex 0 ifindex 0" || exit 1
+ remove_prog out
+}
+
+function test_ctx_in {
+ test_start "test_ctx on lwt in"
+ install_test in test_ctx
+ ping -c 3 $IP_LOCAL || {
+ failure "test_ctx out: packets are dropped"
+ }
+ # We will both request & reply packets as the packets will
+ # be from $IP_LOCAL => $IP_LOCAL
+ match_trace "$(get_trace)" "
+len 84 hash 0 protocol 8
+cb 1234 ingress_ifindex 1 ifindex 1
+len 84 hash 0 protocol 8
+cb 1234 ingress_ifindex 1 ifindex 1
+len 84 hash 0 protocol 8
+cb 1234 ingress_ifindex 1 ifindex 1
+len 84 hash 0 protocol 8
+cb 1234 ingress_ifindex 1 ifindex 1
+len 84 hash 0 protocol 8
+cb 1234 ingress_ifindex 1 ifindex 1
+len 84 hash 0 protocol 8
+cb 1234 ingress_ifindex 1 ifindex 1" || exit 1
+ remove_prog in
+}
+
+function test_data {
+ test_start "test_data on lwt $1"
+ install_test $1 test_data
+ ping -c 3 $IPVETH1 || {
+ failure "test_data ${1}: packets are dropped"
+ }
+ match_trace "$(get_trace)" "
+src: 1fea8c0 dst: 2fea8c0
+src: 1fea8c0 dst: 2fea8c0
+src: 1fea8c0 dst: 2fea8c0" || exit 1
+ remove_prog $1
+}
+
+function test_data_in {
+ test_start "test_data on lwt in"
+ install_test in test_data
+ ping -c 3 $IP_LOCAL || {
+ failure "test_data in: packets are dropped"
+ }
+ # We will both request & reply packets as the packets will
+ # be from $IP_LOCAL => $IP_LOCAL
+ match_trace "$(get_trace)" "
+src: 163a8c0 dst: 163a8c0
+src: 163a8c0 dst: 163a8c0
+src: 163a8c0 dst: 163a8c0
+src: 163a8c0 dst: 163a8c0
+src: 163a8c0 dst: 163a8c0
+src: 163a8c0 dst: 163a8c0" || exit 1
+ remove_prog in
+}
+
+function test_cb {
+ test_start "test_cb on lwt $1"
+ install_test $1 test_cb
+ ping -c 3 $IPVETH1 || {
+ failure "test_cb ${1}: packets are dropped"
+ }
+ match_trace "$(get_trace)" "
+cb0: 0 cb1: 0 cb2: 0
+cb3: 0 cb4: 0
+cb0: 0 cb1: 0 cb2: 0
+cb3: 0 cb4: 0
+cb0: 0 cb1: 0 cb2: 0
+cb3: 0 cb4: 0" || exit 1
+ remove_prog $1
+}
+
+function test_cb_in {
+ test_start "test_cb on lwt in"
+ install_test in test_cb
+ ping -c 3 $IP_LOCAL || {
+ failure "test_cb in: packets are dropped"
+ }
+ # We will both request & reply packets as the packets will
+ # be from $IP_LOCAL => $IP_LOCAL
+ match_trace "$(get_trace)" "
+cb0: 0 cb1: 0 cb2: 0
+cb3: 0 cb4: 0
+cb0: 0 cb1: 0 cb2: 0
+cb3: 0 cb4: 0
+cb0: 0 cb1: 0 cb2: 0
+cb3: 0 cb4: 0
+cb0: 0 cb1: 0 cb2: 0
+cb3: 0 cb4: 0
+cb0: 0 cb1: 0 cb2: 0
+cb3: 0 cb4: 0
+cb0: 0 cb1: 0 cb2: 0
+cb3: 0 cb4: 0" || exit 1
+ remove_prog in
+}
+
+function test_drop_all {
+ test_start "test_drop_all on lwt $1"
+ install_test $1 drop_all
+ ping -c 3 $IPVETH1 && {
+ failure "test_drop_all ${1}: Unexpected success of ping"
+ }
+ match_trace "$(get_trace)" "
+dropping with: 2
+dropping with: 2
+dropping with: 2" || exit 1
+ remove_prog $1
+}
+
+function test_drop_all_in {
+ test_start "test_drop_all on lwt in"
+ install_test in drop_all
+ ping -c 3 $IP_LOCAL && {
+ failure "test_drop_all in: Unexpected success of ping"
+ }
+ match_trace "$(get_trace)" "
+dropping with: 2
+dropping with: 2
+dropping with: 2" || exit 1
+ remove_prog in
+}
+
+function test_push_ll_and_redirect {
+ test_start "test_push_ll_and_redirect on lwt xmit"
+ install_test xmit push_ll_and_redirect
+ ping -c 3 $IPVETH1 || {
+ failure "Redirected packets appear to be dropped"
+ }
+ match_trace "$(get_trace)" "
+redirected to $DST_IFINDEX
+redirected to $DST_IFINDEX
+redirected to $DST_IFINDEX" || exit 1
+ remove_prog xmit
+}
+
+function test_no_l2_and_redirect {
+ test_start "test_no_l2_and_redirect on lwt xmit"
+ install_test xmit fill_garbage_and_redirect
+ ping -c 3 $IPVETH1 && {
+ failure "Unexpected success despite lack of L2 header"
+ }
+ match_trace "$(get_trace)" "
+redirected to $DST_IFINDEX
+redirected to $DST_IFINDEX
+redirected to $DST_IFINDEX" || exit 1
+ remove_prog xmit
+}
+
+function test_rewrite {
+ test_start "test_rewrite on lwt xmit"
+ install_test xmit test_rewrite
+ ping -c 3 $IPVETH1 || {
+ failure "Rewritten packets appear to be dropped"
+ }
+ match_trace "$(get_trace)" "
+out: rewriting from 2fea8c0 to 3fea8c0
+out: rewriting from 2fea8c0 to 3fea8c0
+out: rewriting from 2fea8c0 to 3fea8c0" || exit 1
+ remove_prog out
+}
+
+function test_fill_garbage {
+ test_start "test_fill_garbage on lwt xmit"
+ install_test xmit fill_garbage
+ ping -c 3 $IPVETH1 && {
+ failure "test_drop_all ${1}: Unexpected success of ping"
+ }
+ match_trace "$(get_trace)" "
+Set initial 96 bytes of header to FF
+Set initial 96 bytes of header to FF
+Set initial 96 bytes of header to FF" || exit 1
+ remove_prog xmit
+}
+
+function test_netperf_nop {
+ test_start "test_netperf_nop on lwt xmit"
+ install_test xmit nop
+ netperf -H $IPVETH1 -t TCP_STREAM || {
+ failure "packets appear to be dropped"
+ }
+ match_trace "$(get_trace)" ""|| exit 1
+ remove_prog xmit
+}
+
+function test_netperf_redirect {
+ test_start "test_netperf_redirect on lwt xmit"
+ install_test xmit push_ll_and_redirect_silent
+ netperf -H $IPVETH1 -t TCP_STREAM || {
+ failure "Rewritten packets appear to be dropped"
+ }
+ match_trace "$(get_trace)" ""|| exit 1
+ remove_prog xmit
+}
+
+cleanup
+setup_one_veth $NS1 $VETH0 $VETH1 $IPVETH0 $IPVETH1 $IPVETH1b
+setup_one_veth $NS2 $VETH2 $VETH3 $IPVETH2 $IPVETH3
+ip netns exec $NS1 netserver
+echo 1 > ${TRACE_ROOT}/tracing_on
+
+DST_MAC=$(lookup_mac $VETH1 $NS1)
+SRC_MAC=$(lookup_mac $VETH0)
+DST_IFINDEX=$(cat /sys/class/net/$VETH0/ifindex)
+
+CLANG_OPTS="-O2 -target bpf -I ../include/"
+CLANG_OPTS+=" -DSRC_MAC=$SRC_MAC -DDST_MAC=$DST_MAC -DDST_IFINDEX=$DST_IFINDEX"
+clang $CLANG_OPTS -c test_lwt_bpf.c -o test_lwt_bpf.o
+
+test_ctx_xmit
+test_ctx_out
+test_ctx_in
+test_data "xmit"
+test_data "out"
+test_data_in
+test_cb "xmit"
+test_cb "out"
+test_cb_in
+test_drop_all "xmit"
+test_drop_all "out"
+test_drop_all_in
+test_rewrite
+test_push_ll_and_redirect
+test_no_l2_and_redirect
+test_fill_garbage
+test_netperf_nop
+test_netperf_redirect
+
+cleanup
+echo 0 > ${TRACE_ROOT}/tracing_on
+exit 0
diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c
new file mode 100644
index 000000000..b0200c8ea
--- /dev/null
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define KBUILD_MODNAME "foo"
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/in6.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "trace_common.h"
+
+#define MAX_NR_PORTS 65536
+
+/* map #0 */
+struct inner_a {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, int);
+ __uint(max_entries, MAX_NR_PORTS);
+} port_a SEC(".maps");
+
+/* map #1 */
+struct inner_h {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, int);
+ __uint(max_entries, 1);
+} port_h SEC(".maps");
+
+/* map #2 */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, int);
+ __uint(max_entries, 1);
+} reg_result_h SEC(".maps");
+
+/* map #3 */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, int);
+ __uint(max_entries, 1);
+} inline_result_h SEC(".maps");
+
+/* map #4 */ /* Test case #0 */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, MAX_NR_PORTS);
+ __uint(key_size, sizeof(u32));
+ __array(values, struct inner_a); /* use inner_a as inner map */
+} a_of_port_a SEC(".maps");
+
+/* map #5 */ /* Test case #1 */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(u32));
+ __array(values, struct inner_a); /* use inner_a as inner map */
+} h_of_port_a SEC(".maps");
+
+/* map #6 */ /* Test case #2 */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(u32));
+ __array(values, struct inner_h); /* use inner_h as inner map */
+} h_of_port_h SEC(".maps");
+
+static __always_inline int do_reg_lookup(void *inner_map, u32 port)
+{
+ int *result;
+
+ result = bpf_map_lookup_elem(inner_map, &port);
+ return result ? *result : -ENOENT;
+}
+
+static __always_inline int do_inline_array_lookup(void *inner_map, u32 port)
+{
+ int *result;
+
+ if (inner_map != &port_a)
+ return -EINVAL;
+
+ result = bpf_map_lookup_elem(&port_a, &port);
+ return result ? *result : -ENOENT;
+}
+
+static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port)
+{
+ int *result;
+
+ if (inner_map != &port_h)
+ return -EINVAL;
+
+ result = bpf_map_lookup_elem(&port_h, &port);
+ return result ? *result : -ENOENT;
+}
+
+SEC("kprobe/__sys_connect")
+int trace_sys_connect(struct pt_regs *ctx)
+{
+ struct sockaddr_in6 *in6;
+ u16 test_case, port, dst6[8];
+ int addrlen, ret, inline_ret, ret_key = 0;
+ u32 port_key;
+ void *outer_map, *inner_map;
+ bool inline_hash = false;
+
+ in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(ctx);
+ addrlen = (int)PT_REGS_PARM3_CORE(ctx);
+
+ if (addrlen != sizeof(*in6))
+ return 0;
+
+ ret = bpf_probe_read_user(dst6, sizeof(dst6), &in6->sin6_addr);
+ if (ret) {
+ inline_ret = ret;
+ goto done;
+ }
+
+ if (dst6[0] != 0xdead || dst6[1] != 0xbeef)
+ return 0;
+
+ test_case = dst6[7];
+
+ ret = bpf_probe_read_user(&port, sizeof(port), &in6->sin6_port);
+ if (ret) {
+ inline_ret = ret;
+ goto done;
+ }
+
+ port_key = port;
+
+ ret = -ENOENT;
+ if (test_case == 0) {
+ outer_map = &a_of_port_a;
+ } else if (test_case == 1) {
+ outer_map = &h_of_port_a;
+ } else if (test_case == 2) {
+ outer_map = &h_of_port_h;
+ } else {
+ ret = __LINE__;
+ inline_ret = ret;
+ goto done;
+ }
+
+ inner_map = bpf_map_lookup_elem(outer_map, &port_key);
+ if (!inner_map) {
+ ret = __LINE__;
+ inline_ret = ret;
+ goto done;
+ }
+
+ ret = do_reg_lookup(inner_map, port_key);
+
+ if (test_case == 0 || test_case == 1)
+ inline_ret = do_inline_array_lookup(inner_map, port_key);
+ else
+ inline_ret = do_inline_hash_lookup(inner_map, port_key);
+
+done:
+ bpf_map_update_elem(&reg_result_h, &ret_key, &ret, BPF_ANY);
+ bpf_map_update_elem(&inline_result_h, &ret_key, &inline_ret, BPF_ANY);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/test_map_in_map_user.c b/samples/bpf/test_map_in_map_user.c
new file mode 100644
index 000000000..652ec7205
--- /dev/null
+++ b/samples/bpf/test_map_in_map_user.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 Facebook
+ */
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <stdint.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_util.h"
+
+static int map_fd[7];
+
+#define PORT_A (map_fd[0])
+#define PORT_H (map_fd[1])
+#define REG_RESULT_H (map_fd[2])
+#define INLINE_RESULT_H (map_fd[3])
+#define A_OF_PORT_A (map_fd[4]) /* Test case #0 */
+#define H_OF_PORT_A (map_fd[5]) /* Test case #1 */
+#define H_OF_PORT_H (map_fd[6]) /* Test case #2 */
+
+static const char * const test_names[] = {
+ "Array of Array",
+ "Hash of Array",
+ "Hash of Hash",
+};
+
+#define NR_TESTS ARRAY_SIZE(test_names)
+
+static void check_map_id(int inner_map_fd, int map_in_map_fd, uint32_t key)
+{
+ struct bpf_map_info info = {};
+ uint32_t info_len = sizeof(info);
+ int ret, id;
+
+ ret = bpf_obj_get_info_by_fd(inner_map_fd, &info, &info_len);
+ assert(!ret);
+
+ ret = bpf_map_lookup_elem(map_in_map_fd, &key, &id);
+ assert(!ret);
+ assert(id == info.id);
+}
+
+static void populate_map(uint32_t port_key, int magic_result)
+{
+ int ret;
+
+ ret = bpf_map_update_elem(PORT_A, &port_key, &magic_result, BPF_ANY);
+ assert(!ret);
+
+ ret = bpf_map_update_elem(PORT_H, &port_key, &magic_result,
+ BPF_NOEXIST);
+ assert(!ret);
+
+ ret = bpf_map_update_elem(A_OF_PORT_A, &port_key, &PORT_A, BPF_ANY);
+ assert(!ret);
+ check_map_id(PORT_A, A_OF_PORT_A, port_key);
+
+ ret = bpf_map_update_elem(H_OF_PORT_A, &port_key, &PORT_A, BPF_NOEXIST);
+ assert(!ret);
+ check_map_id(PORT_A, H_OF_PORT_A, port_key);
+
+ ret = bpf_map_update_elem(H_OF_PORT_H, &port_key, &PORT_H, BPF_NOEXIST);
+ assert(!ret);
+ check_map_id(PORT_H, H_OF_PORT_H, port_key);
+}
+
+static void test_map_in_map(void)
+{
+ struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 };
+ uint32_t result_key = 0, port_key;
+ int result, inline_result;
+ int magic_result = 0xfaceb00c;
+ int ret;
+ int i;
+
+ port_key = rand() & 0x00FF;
+ populate_map(port_key, magic_result);
+
+ in6.sin6_addr.s6_addr16[0] = 0xdead;
+ in6.sin6_addr.s6_addr16[1] = 0xbeef;
+ in6.sin6_port = port_key;
+
+ for (i = 0; i < NR_TESTS; i++) {
+ printf("%s: ", test_names[i]);
+
+ in6.sin6_addr.s6_addr16[7] = i;
+ ret = connect(-1, (struct sockaddr *)&in6, sizeof(in6));
+ assert(ret == -1 && errno == EBADF);
+
+ ret = bpf_map_lookup_elem(REG_RESULT_H, &result_key, &result);
+ assert(!ret);
+
+ ret = bpf_map_lookup_elem(INLINE_RESULT_H, &result_key,
+ &inline_result);
+ assert(!ret);
+
+ if (result != magic_result || inline_result != magic_result) {
+ printf("Error. result:%d inline_result:%d\n",
+ result, inline_result);
+ exit(1);
+ }
+
+ bpf_map_delete_elem(REG_RESULT_H, &result_key);
+ bpf_map_delete_elem(INLINE_RESULT_H, &result_key);
+
+ printf("Pass\n");
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ char filename[256];
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "trace_sys_connect");
+ if (!prog) {
+ printf("finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "port_a");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "port_h");
+ map_fd[2] = bpf_object__find_map_fd_by_name(obj, "reg_result_h");
+ map_fd[3] = bpf_object__find_map_fd_by_name(obj, "inline_result_h");
+ map_fd[4] = bpf_object__find_map_fd_by_name(obj, "a_of_port_a");
+ map_fd[5] = bpf_object__find_map_fd_by_name(obj, "h_of_port_a");
+ map_fd[6] = bpf_object__find_map_fd_by_name(obj, "h_of_port_h");
+ if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0 ||
+ map_fd[3] < 0 || map_fd[4] < 0 || map_fd[5] < 0 || map_fd[6] < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
+ }
+
+ test_map_in_map();
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/test_overhead_kprobe_kern.c b/samples/bpf/test_overhead_kprobe_kern.c
new file mode 100644
index 000000000..8fdd2c9c5
--- /dev/null
+++ b/samples/bpf/test_overhead_kprobe_kern.c
@@ -0,0 +1,49 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define _(P) \
+ ({ \
+ typeof(P) val = 0; \
+ bpf_probe_read_kernel(&val, sizeof(val), &(P)); \
+ val; \
+ })
+
+SEC("kprobe/__set_task_comm")
+int prog(struct pt_regs *ctx)
+{
+ struct signal_struct *signal;
+ struct task_struct *tsk;
+ char oldcomm[TASK_COMM_LEN] = {};
+ char newcomm[TASK_COMM_LEN] = {};
+ u16 oom_score_adj;
+ u32 pid;
+
+ tsk = (void *)PT_REGS_PARM1(ctx);
+
+ pid = _(tsk->pid);
+ bpf_probe_read_kernel_str(oldcomm, sizeof(oldcomm), &tsk->comm);
+ bpf_probe_read_kernel_str(newcomm, sizeof(newcomm),
+ (void *)PT_REGS_PARM2(ctx));
+ signal = _(tsk->signal);
+ oom_score_adj = _(signal->oom_score_adj);
+ return 0;
+}
+
+SEC("kprobe/urandom_read")
+int prog2(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/test_overhead_raw_tp_kern.c b/samples/bpf/test_overhead_raw_tp_kern.c
new file mode 100644
index 000000000..8763181a3
--- /dev/null
+++ b/samples/bpf/test_overhead_raw_tp_kern.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Facebook */
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("raw_tracepoint/task_rename")
+int prog(struct bpf_raw_tracepoint_args *ctx)
+{
+ return 0;
+}
+
+SEC("raw_tracepoint/urandom_read")
+int prog2(struct bpf_raw_tracepoint_args *ctx)
+{
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_overhead_tp_kern.c b/samples/bpf/test_overhead_tp_kern.c
new file mode 100644
index 000000000..80edadacb
--- /dev/null
+++ b/samples/bpf/test_overhead_tp_kern.c
@@ -0,0 +1,37 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+/* from /sys/kernel/debug/tracing/events/task/task_rename/format */
+struct task_rename {
+ __u64 pad;
+ __u32 pid;
+ char oldcomm[TASK_COMM_LEN];
+ char newcomm[TASK_COMM_LEN];
+ __u16 oom_score_adj;
+};
+SEC("tracepoint/task/task_rename")
+int prog(struct task_rename *ctx)
+{
+ return 0;
+}
+
+/* from /sys/kernel/debug/tracing/events/random/urandom_read/format */
+struct urandom_read {
+ __u64 pad;
+ int got_bits;
+ int pool_left;
+ int input_left;
+};
+SEC("tracepoint/random/urandom_read")
+int prog2(struct urandom_read *ctx)
+{
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
new file mode 100644
index 000000000..88717f8ec
--- /dev/null
+++ b/samples/bpf/test_overhead_user.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 Facebook
+ */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <errno.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <asm/unistd.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <linux/bpf.h>
+#include <string.h>
+#include <time.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#define MAX_CNT 1000000
+
+static struct bpf_link *links[2];
+static struct bpf_object *obj;
+static int cnt;
+
+static __u64 time_get_ns(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+static void test_task_rename(int cpu)
+{
+ __u64 start_time;
+ char buf[] = "test\n";
+ int i, fd;
+
+ fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
+ if (fd < 0) {
+ printf("couldn't open /proc\n");
+ exit(1);
+ }
+ start_time = time_get_ns();
+ for (i = 0; i < MAX_CNT; i++) {
+ if (write(fd, buf, sizeof(buf)) < 0) {
+ printf("task rename failed: %s\n", strerror(errno));
+ close(fd);
+ return;
+ }
+ }
+ printf("task_rename:%d: %lld events per sec\n",
+ cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+ close(fd);
+}
+
+static void test_urandom_read(int cpu)
+{
+ __u64 start_time;
+ char buf[4];
+ int i, fd;
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd < 0) {
+ printf("couldn't open /dev/urandom\n");
+ exit(1);
+ }
+ start_time = time_get_ns();
+ for (i = 0; i < MAX_CNT; i++) {
+ if (read(fd, buf, sizeof(buf)) < 0) {
+ printf("failed to read from /dev/urandom: %s\n", strerror(errno));
+ close(fd);
+ return;
+ }
+ }
+ printf("urandom_read:%d: %lld events per sec\n",
+ cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+ close(fd);
+}
+
+static void loop(int cpu, int flags)
+{
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ sched_setaffinity(0, sizeof(cpuset), &cpuset);
+
+ if (flags & 1)
+ test_task_rename(cpu);
+ if (flags & 2)
+ test_urandom_read(cpu);
+}
+
+static void run_perf_test(int tasks, int flags)
+{
+ pid_t pid[tasks];
+ int i;
+
+ for (i = 0; i < tasks; i++) {
+ pid[i] = fork();
+ if (pid[i] == 0) {
+ loop(i, flags);
+ exit(0);
+ } else if (pid[i] == -1) {
+ printf("couldn't spawn #%d process\n", i);
+ exit(1);
+ }
+ }
+ for (i = 0; i < tasks; i++) {
+ int status;
+
+ assert(waitpid(pid[i], &status, 0) == pid[i]);
+ assert(status == 0);
+ }
+}
+
+static int load_progs(char *filename)
+{
+ struct bpf_program *prog;
+ int err = 0;
+
+ obj = bpf_object__open_file(filename, NULL);
+ err = libbpf_get_error(obj);
+ if (err < 0) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return err;
+ }
+
+ /* load BPF program */
+ err = bpf_object__load(obj);
+ if (err < 0) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ return err;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ links[cnt] = bpf_program__attach(prog);
+ err = libbpf_get_error(links[cnt]);
+ if (err < 0) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[cnt] = NULL;
+ return err;
+ }
+ cnt++;
+ }
+
+ return err;
+}
+
+static void unload_progs(void)
+{
+ while (cnt)
+ bpf_link__destroy(links[--cnt]);
+
+ bpf_object__close(obj);
+}
+
+int main(int argc, char **argv)
+{
+ int num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
+ int test_flags = ~0;
+ char filename[256];
+ int err = 0;
+
+
+ if (argc > 1)
+ test_flags = atoi(argv[1]) ? : test_flags;
+ if (argc > 2)
+ num_cpu = atoi(argv[2]) ? : num_cpu;
+
+ if (test_flags & 0x3) {
+ printf("BASE\n");
+ run_perf_test(num_cpu, test_flags);
+ }
+
+ if (test_flags & 0xC) {
+ snprintf(filename, sizeof(filename),
+ "%s_kprobe_kern.o", argv[0]);
+
+ printf("w/KPROBE\n");
+ err = load_progs(filename);
+ if (!err)
+ run_perf_test(num_cpu, test_flags >> 2);
+
+ unload_progs();
+ }
+
+ if (test_flags & 0x30) {
+ snprintf(filename, sizeof(filename),
+ "%s_tp_kern.o", argv[0]);
+ printf("w/TRACEPOINT\n");
+ err = load_progs(filename);
+ if (!err)
+ run_perf_test(num_cpu, test_flags >> 4);
+
+ unload_progs();
+ }
+
+ if (test_flags & 0xC0) {
+ snprintf(filename, sizeof(filename),
+ "%s_raw_tp_kern.o", argv[0]);
+ printf("w/RAW_TRACEPOINT\n");
+ err = load_progs(filename);
+ if (!err)
+ run_perf_test(num_cpu, test_flags >> 6);
+
+ unload_progs();
+ }
+
+ return err;
+}
diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh
new file mode 100755
index 000000000..35db26f73
--- /dev/null
+++ b/samples/bpf/test_override_return.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+rm -r tmpmnt
+rm -f testfile.img
+dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
+DEVICE=$(losetup --show -f testfile.img)
+mkfs.btrfs -f $DEVICE
+mkdir tmpmnt
+./tracex7 $DEVICE
+if [ $? -eq 0 ]
+then
+ echo "SUCCESS!"
+else
+ echo "FAILED!"
+fi
+losetup -d $DEVICE
diff --git a/samples/bpf/test_probe_write_user_kern.c b/samples/bpf/test_probe_write_user_kern.c
new file mode 100644
index 000000000..220a96438
--- /dev/null
+++ b/samples/bpf/test_probe_write_user_kern.c
@@ -0,0 +1,56 @@
+/* Copyright (c) 2016 Sargun Dhillon <sargun@sargun.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "trace_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct sockaddr_in);
+ __type(value, struct sockaddr_in);
+ __uint(max_entries, 256);
+} dnat_map SEC(".maps");
+
+/* kprobe is NOT a stable ABI
+ * kernel functions can be removed, renamed or completely change semantics.
+ * Number of arguments and their positions can change, etc.
+ * In such case this bpf+kprobe example will no longer be meaningful
+ *
+ * This example sits on a syscall, and the syscall ABI is relatively stable
+ * of course, across platforms, and over time, the ABI may change.
+ */
+SEC("kprobe/" SYSCALL(sys_connect))
+int bpf_prog1(struct pt_regs *ctx)
+{
+ struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
+ void *sockaddr_arg = (void *)PT_REGS_PARM2_CORE(real_regs);
+ int sockaddr_len = (int)PT_REGS_PARM3_CORE(real_regs);
+ struct sockaddr_in new_addr, orig_addr = {};
+ struct sockaddr_in *mapped_addr;
+
+ if (sockaddr_len > sizeof(orig_addr))
+ return 0;
+
+ if (bpf_probe_read_user(&orig_addr, sizeof(orig_addr), sockaddr_arg) != 0)
+ return 0;
+
+ mapped_addr = bpf_map_lookup_elem(&dnat_map, &orig_addr);
+ if (mapped_addr != NULL) {
+ memcpy(&new_addr, mapped_addr, sizeof(new_addr));
+ bpf_probe_write_user(sockaddr_arg, &new_addr,
+ sizeof(new_addr));
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/test_probe_write_user_user.c b/samples/bpf/test_probe_write_user_user.c
new file mode 100644
index 000000000..00ccfb834
--- /dev/null
+++ b/samples/bpf/test_probe_write_user_user.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <assert.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+int main(int ac, char **argv)
+{
+ struct sockaddr_in *serv_addr_in, *mapped_addr_in, *tmp_addr_in;
+ struct sockaddr serv_addr, mapped_addr, tmp_addr;
+ int serverfd, serverconnfd, clientfd, map_fd;
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ socklen_t sockaddr_len;
+ char filename[256];
+ char *ip;
+
+ serv_addr_in = (struct sockaddr_in *)&serv_addr;
+ mapped_addr_in = (struct sockaddr_in *)&mapped_addr;
+ tmp_addr_in = (struct sockaddr_in *)&tmp_addr;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (libbpf_get_error(prog)) {
+ fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "dnat_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
+ }
+
+ assert((serverfd = socket(AF_INET, SOCK_STREAM, 0)) > 0);
+ assert((clientfd = socket(AF_INET, SOCK_STREAM, 0)) > 0);
+
+ /* Bind server to ephemeral port on lo */
+ memset(&serv_addr, 0, sizeof(serv_addr));
+ serv_addr_in->sin_family = AF_INET;
+ serv_addr_in->sin_port = 0;
+ serv_addr_in->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+
+ assert(bind(serverfd, &serv_addr, sizeof(serv_addr)) == 0);
+
+ sockaddr_len = sizeof(serv_addr);
+ assert(getsockname(serverfd, &serv_addr, &sockaddr_len) == 0);
+ ip = inet_ntoa(serv_addr_in->sin_addr);
+ printf("Server bound to: %s:%d\n", ip, ntohs(serv_addr_in->sin_port));
+
+ memset(&mapped_addr, 0, sizeof(mapped_addr));
+ mapped_addr_in->sin_family = AF_INET;
+ mapped_addr_in->sin_port = htons(5555);
+ mapped_addr_in->sin_addr.s_addr = inet_addr("255.255.255.255");
+
+ assert(!bpf_map_update_elem(map_fd, &mapped_addr, &serv_addr, BPF_ANY));
+
+ assert(listen(serverfd, 5) == 0);
+
+ ip = inet_ntoa(mapped_addr_in->sin_addr);
+ printf("Client connecting to: %s:%d\n",
+ ip, ntohs(mapped_addr_in->sin_port));
+ assert(connect(clientfd, &mapped_addr, sizeof(mapped_addr)) == 0);
+
+ sockaddr_len = sizeof(tmp_addr);
+ ip = inet_ntoa(tmp_addr_in->sin_addr);
+ assert((serverconnfd = accept(serverfd, &tmp_addr, &sockaddr_len)) > 0);
+ printf("Server received connection from: %s:%d\n",
+ ip, ntohs(tmp_addr_in->sin_port));
+
+ sockaddr_len = sizeof(tmp_addr);
+ assert(getpeername(clientfd, &tmp_addr, &sockaddr_len) == 0);
+ ip = inet_ntoa(tmp_addr_in->sin_addr);
+ printf("Client's peer address: %s:%d\n",
+ ip, ntohs(tmp_addr_in->sin_port));
+
+ /* Is the server's getsockname = the socket getpeername */
+ assert(memcmp(&serv_addr, &tmp_addr, sizeof(struct sockaddr_in)) == 0);
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/trace_common.h b/samples/bpf/trace_common.h
new file mode 100644
index 000000000..8cb5400ae
--- /dev/null
+++ b/samples/bpf/trace_common.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __TRACE_COMMON_H
+#define __TRACE_COMMON_H
+
+#ifdef __x86_64__
+#define SYSCALL(SYS) "__x64_" __stringify(SYS)
+#elif defined(__s390x__)
+#define SYSCALL(SYS) "__s390x_" __stringify(SYS)
+#else
+#define SYSCALL(SYS) __stringify(SYS)
+#endif
+
+#endif
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
new file mode 100644
index 000000000..0bba5fcd7
--- /dev/null
+++ b/samples/bpf/trace_event_kern.c
@@ -0,0 +1,79 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/bpf_perf_event.h>
+#include <uapi/linux/perf_event.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct key_t {
+ char comm[TASK_COMM_LEN];
+ u32 kernstack;
+ u32 userstack;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct key_t);
+ __type(value, u64);
+ __uint(max_entries, 10000);
+} counts SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
+ __uint(max_entries, 10000);
+} stackmap SEC(".maps");
+
+#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
+#define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
+
+SEC("perf_event")
+int bpf_prog1(struct bpf_perf_event_data *ctx)
+{
+ char time_fmt1[] = "Time Enabled: %llu, Time Running: %llu";
+ char time_fmt2[] = "Get Time Failed, ErrCode: %d";
+ char addr_fmt[] = "Address recorded on event: %llx";
+ char fmt[] = "CPU-%d period %lld ip %llx";
+ u32 cpu = bpf_get_smp_processor_id();
+ struct bpf_perf_event_value value_buf;
+ struct key_t key;
+ u64 *val, one = 1;
+ int ret;
+
+ if (ctx->sample_period < 10000)
+ /* ignore warmup */
+ return 0;
+ bpf_get_current_comm(&key.comm, sizeof(key.comm));
+ key.kernstack = bpf_get_stackid(ctx, &stackmap, KERN_STACKID_FLAGS);
+ key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS);
+ if ((int)key.kernstack < 0 && (int)key.userstack < 0) {
+ bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period,
+ PT_REGS_IP(&ctx->regs));
+ return 0;
+ }
+
+ ret = bpf_perf_prog_read_value(ctx, (void *)&value_buf, sizeof(struct bpf_perf_event_value));
+ if (!ret)
+ bpf_trace_printk(time_fmt1, sizeof(time_fmt1), value_buf.enabled, value_buf.running);
+ else
+ bpf_trace_printk(time_fmt2, sizeof(time_fmt2), ret);
+
+ if (ctx->addr != 0)
+ bpf_trace_printk(addr_fmt, sizeof(addr_fmt), ctx->addr);
+
+ val = bpf_map_lookup_elem(&counts, &key);
+ if (val)
+ (*val)++;
+ else
+ bpf_map_update_elem(&counts, &key, &one, BPF_NOEXIST);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
new file mode 100644
index 000000000..9664749bf
--- /dev/null
+++ b/samples/bpf/trace_event_user.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 Facebook
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <signal.h>
+#include <errno.h>
+#include <sys/resource.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "perf-sys.h"
+#include "trace_helpers.h"
+
+#define SAMPLE_FREQ 50
+
+static int pid;
+/* counts, stackmap */
+static int map_fd[2];
+struct bpf_program *prog;
+static bool sys_read_seen, sys_write_seen;
+
+static void print_ksym(__u64 addr)
+{
+ struct ksym *sym;
+
+ if (!addr)
+ return;
+ sym = ksym_search(addr);
+ if (!sym) {
+ printf("ksym not found. Is kallsyms loaded?\n");
+ return;
+ }
+
+ printf("%s;", sym->name);
+ if (!strstr(sym->name, "sys_read"))
+ sys_read_seen = true;
+ else if (!strstr(sym->name, "sys_write"))
+ sys_write_seen = true;
+}
+
+static void print_addr(__u64 addr)
+{
+ if (!addr)
+ return;
+ printf("%llx;", addr);
+}
+
+#define TASK_COMM_LEN 16
+
+struct key_t {
+ char comm[TASK_COMM_LEN];
+ __u32 kernstack;
+ __u32 userstack;
+};
+
+static void print_stack(struct key_t *key, __u64 count)
+{
+ __u64 ip[PERF_MAX_STACK_DEPTH] = {};
+ static bool warned;
+ int i;
+
+ printf("%3lld %s;", count, key->comm);
+ if (bpf_map_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
+ printf("---;");
+ } else {
+ for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
+ print_ksym(ip[i]);
+ }
+ printf("-;");
+ if (bpf_map_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
+ printf("---;");
+ } else {
+ for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
+ print_addr(ip[i]);
+ }
+ if (count < 6)
+ printf("\r");
+ else
+ printf("\n");
+
+ if (key->kernstack == -EEXIST && !warned) {
+ printf("stackmap collisions seen. Consider increasing size\n");
+ warned = true;
+ } else if ((int)key->kernstack < 0 && (int)key->userstack < 0) {
+ printf("err stackid %d %d\n", key->kernstack, key->userstack);
+ }
+}
+
+static void err_exit(int err)
+{
+ kill(pid, SIGKILL);
+ exit(err);
+}
+
+static void print_stacks(void)
+{
+ struct key_t key = {}, next_key;
+ __u64 value;
+ __u32 stackid = 0, next_id;
+ int error = 1, fd = map_fd[0], stack_map = map_fd[1];
+
+ sys_read_seen = sys_write_seen = false;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ bpf_map_lookup_elem(fd, &next_key, &value);
+ print_stack(&next_key, value);
+ bpf_map_delete_elem(fd, &next_key);
+ key = next_key;
+ }
+ printf("\n");
+ if (!sys_read_seen || !sys_write_seen) {
+ printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
+ err_exit(error);
+ }
+
+ /* clear stack map */
+ while (bpf_map_get_next_key(stack_map, &stackid, &next_id) == 0) {
+ bpf_map_delete_elem(stack_map, &next_id);
+ stackid = next_id;
+ }
+}
+
+static inline int generate_load(void)
+{
+ if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
+ printf("failed to generate some load with dd: %s\n", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static void test_perf_event_all_cpu(struct perf_event_attr *attr)
+{
+ int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ struct bpf_link **links = calloc(nr_cpus, sizeof(struct bpf_link *));
+ int i, pmu_fd, error = 1;
+
+ if (!links) {
+ printf("malloc of links failed\n");
+ goto err;
+ }
+
+ /* system wide perf event, no need to inherit */
+ attr->inherit = 0;
+
+ /* open perf_event on all cpus */
+ for (i = 0; i < nr_cpus; i++) {
+ pmu_fd = sys_perf_event_open(attr, -1, i, -1, 0);
+ if (pmu_fd < 0) {
+ printf("sys_perf_event_open failed\n");
+ goto all_cpu_err;
+ }
+ links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
+ if (libbpf_get_error(links[i])) {
+ printf("bpf_program__attach_perf_event failed\n");
+ links[i] = NULL;
+ close(pmu_fd);
+ goto all_cpu_err;
+ }
+ }
+
+ if (generate_load() < 0)
+ goto all_cpu_err;
+
+ print_stacks();
+ error = 0;
+all_cpu_err:
+ for (i--; i >= 0; i--)
+ bpf_link__destroy(links[i]);
+err:
+ free(links);
+ if (error)
+ err_exit(error);
+}
+
+static void test_perf_event_task(struct perf_event_attr *attr)
+{
+ struct bpf_link *link = NULL;
+ int pmu_fd, error = 1;
+
+ /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
+ * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
+ */
+ attr->inherit = 1;
+
+ /* open task bound event */
+ pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
+ if (pmu_fd < 0) {
+ printf("sys_perf_event_open failed\n");
+ goto err;
+ }
+ link = bpf_program__attach_perf_event(prog, pmu_fd);
+ if (libbpf_get_error(link)) {
+ printf("bpf_program__attach_perf_event failed\n");
+ link = NULL;
+ close(pmu_fd);
+ goto err;
+ }
+
+ if (generate_load() < 0)
+ goto err;
+
+ print_stacks();
+ error = 0;
+err:
+ bpf_link__destroy(link);
+ if (error)
+ err_exit(error);
+}
+
+static void test_bpf_perf_event(void)
+{
+ struct perf_event_attr attr_type_hw = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ struct perf_event_attr attr_type_sw = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ };
+ struct perf_event_attr attr_hw_cache_l1d = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_L1D |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ };
+ struct perf_event_attr attr_hw_cache_branch_miss = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_BPU |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ };
+ struct perf_event_attr attr_type_raw = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_RAW,
+ /* Intel Instruction Retired */
+ .config = 0xc0,
+ };
+ struct perf_event_attr attr_type_raw_lock_load = {
+ .sample_freq = SAMPLE_FREQ,
+ .freq = 1,
+ .type = PERF_TYPE_RAW,
+ /* Intel MEM_UOPS_RETIRED.LOCK_LOADS */
+ .config = 0x21d0,
+ /* Request to record lock address from PEBS */
+ .sample_type = PERF_SAMPLE_ADDR,
+ /* Record address value requires precise event */
+ .precise_ip = 2,
+ };
+
+ printf("Test HW_CPU_CYCLES\n");
+ test_perf_event_all_cpu(&attr_type_hw);
+ test_perf_event_task(&attr_type_hw);
+
+ printf("Test SW_CPU_CLOCK\n");
+ test_perf_event_all_cpu(&attr_type_sw);
+ test_perf_event_task(&attr_type_sw);
+
+ printf("Test HW_CACHE_L1D\n");
+ test_perf_event_all_cpu(&attr_hw_cache_l1d);
+ test_perf_event_task(&attr_hw_cache_l1d);
+
+ printf("Test HW_CACHE_BPU\n");
+ test_perf_event_all_cpu(&attr_hw_cache_branch_miss);
+ test_perf_event_task(&attr_hw_cache_branch_miss);
+
+ printf("Test Instruction Retired\n");
+ test_perf_event_all_cpu(&attr_type_raw);
+ test_perf_event_task(&attr_type_raw);
+
+ printf("Test Lock Load\n");
+ test_perf_event_all_cpu(&attr_type_raw_lock_load);
+ test_perf_event_task(&attr_type_raw_lock_load);
+
+ printf("*** PASS ***\n");
+}
+
+
+int main(int argc, char **argv)
+{
+ struct bpf_object *obj = NULL;
+ char filename[256];
+ int error = 1;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ signal(SIGINT, err_exit);
+ signal(SIGTERM, err_exit);
+
+ if (load_kallsyms()) {
+ printf("failed to process /proc/kallsyms\n");
+ goto cleanup;
+ }
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ printf("opening BPF object file failed\n");
+ obj = NULL;
+ goto cleanup;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (!prog) {
+ printf("finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ printf("loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap");
+ if (map_fd[0] < 0 || map_fd[1] < 0) {
+ printf("finding a counts/stackmap map in obj file failed\n");
+ goto cleanup;
+ }
+
+ pid = fork();
+ if (pid == 0) {
+ read_trace_pipe();
+ return 0;
+ } else if (pid == -1) {
+ printf("couldn't spawn process\n");
+ goto cleanup;
+ }
+
+ test_bpf_perf_event();
+ error = 0;
+
+cleanup:
+ bpf_object__close(obj);
+ err_exit(error);
+}
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
new file mode 100644
index 000000000..b64815af0
--- /dev/null
+++ b/samples/bpf/trace_output_kern.c
@@ -0,0 +1,31 @@
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "trace_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(u32));
+ __uint(max_entries, 2);
+} my_map SEC(".maps");
+
+SEC("kprobe/" SYSCALL(sys_write))
+int bpf_prog1(struct pt_regs *ctx)
+{
+ struct S {
+ u64 pid;
+ u64 cookie;
+ } data;
+
+ data.pid = bpf_get_current_pid_tgid();
+ data.cookie = 0x12345678;
+
+ bpf_perf_event_output(ctx, &my_map, 0, &data, sizeof(data));
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
new file mode 100644
index 000000000..371732f9c
--- /dev/null
+++ b/samples/bpf/trace_output_user.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stdio.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <time.h>
+#include <signal.h>
+#include <bpf/libbpf.h>
+
+static __u64 time_get_ns(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+static __u64 start_time;
+static __u64 cnt;
+
+#define MAX_CNT 100000ll
+
+static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
+{
+ struct {
+ __u64 pid;
+ __u64 cookie;
+ } *e = data;
+
+ if (e->cookie != 0x12345678) {
+ printf("BUG pid %llx cookie %llx sized %d\n",
+ e->pid, e->cookie, size);
+ return;
+ }
+
+ cnt++;
+
+ if (cnt == MAX_CNT) {
+ printf("recv %lld events per sec\n",
+ MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+ return;
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct perf_buffer *pb;
+ struct bpf_object *obj;
+ int map_fd, ret = 0;
+ char filename[256];
+ FILE *f;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "my_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (libbpf_get_error(prog)) {
+ fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
+ }
+
+ pb = perf_buffer__new(map_fd, 8, print_bpf_output, NULL, NULL, NULL);
+ ret = libbpf_get_error(pb);
+ if (ret) {
+ printf("failed to setup perf_buffer: %d\n", ret);
+ return 1;
+ }
+
+ f = popen("taskset 1 dd if=/dev/zero of=/dev/null", "r");
+ (void) f;
+
+ start_time = time_get_ns();
+ while ((ret = perf_buffer__poll(pb, 1000)) >= 0 && cnt < MAX_CNT) {
+ }
+ kill(0, SIGINT);
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+ return ret;
+}
diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
new file mode 100644
index 000000000..ef30d2b35
--- /dev/null
+++ b/samples/bpf/tracex1_kern.c
@@ -0,0 +1,54 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define _(P) \
+ ({ \
+ typeof(P) val = 0; \
+ bpf_probe_read_kernel(&val, sizeof(val), &(P)); \
+ val; \
+ })
+
+/* kprobe is NOT a stable ABI
+ * kernel functions can be removed, renamed or completely change semantics.
+ * Number of arguments and their positions can change, etc.
+ * In such case this bpf+kprobe example will no longer be meaningful
+ */
+SEC("kprobe/__netif_receive_skb_core")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ /* attaches to kprobe __netif_receive_skb_core,
+ * looks for packets on loobpack device and prints them
+ */
+ char devname[IFNAMSIZ];
+ struct net_device *dev;
+ struct sk_buff *skb;
+ int len;
+
+ /* non-portable! works for the given kernel only */
+ bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
+ dev = _(skb->dev);
+ len = _(skb->len);
+
+ bpf_probe_read_kernel(devname, sizeof(devname), dev->name);
+
+ if (devname[0] == 'l' && devname[1] == 'o') {
+ char fmt[] = "skb %p len %d\n";
+ /* using bpf_trace_printk() for DEBUG ONLY */
+ bpf_trace_printk(fmt, sizeof(fmt), skb, len);
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c
new file mode 100644
index 000000000..9d4adb7fd
--- /dev/null
+++ b/samples/bpf/tracex1_user.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <unistd.h>
+#include <bpf/libbpf.h>
+#include "trace_helpers.h"
+
+int main(int ac, char **argv)
+{
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ char filename[256];
+ FILE *f;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (!prog) {
+ fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
+ }
+
+ f = popen("taskset 1 ping -c5 localhost", "r");
+ (void) f;
+
+ read_trace_pipe();
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
new file mode 100644
index 000000000..5bc696bac
--- /dev/null
+++ b/samples/bpf/tracex2_kern.c
@@ -0,0 +1,102 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "trace_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, long);
+ __type(value, long);
+ __uint(max_entries, 1024);
+} my_map SEC(".maps");
+
+/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
+ * example will no longer be meaningful
+ */
+SEC("kprobe/kfree_skb")
+int bpf_prog2(struct pt_regs *ctx)
+{
+ long loc = 0;
+ long init_val = 1;
+ long *value;
+
+ /* read ip of kfree_skb caller.
+ * non-portable version of __builtin_return_address(0)
+ */
+ BPF_KPROBE_READ_RET_IP(loc, ctx);
+
+ value = bpf_map_lookup_elem(&my_map, &loc);
+ if (value)
+ *value += 1;
+ else
+ bpf_map_update_elem(&my_map, &loc, &init_val, BPF_ANY);
+ return 0;
+}
+
+static unsigned int log2(unsigned int v)
+{
+ unsigned int r;
+ unsigned int shift;
+
+ r = (v > 0xFFFF) << 4; v >>= r;
+ shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
+ shift = (v > 0xF) << 2; v >>= shift; r |= shift;
+ shift = (v > 0x3) << 1; v >>= shift; r |= shift;
+ r |= (v >> 1);
+ return r;
+}
+
+static unsigned int log2l(unsigned long v)
+{
+ unsigned int hi = v >> 32;
+ if (hi)
+ return log2(hi) + 32;
+ else
+ return log2(v);
+}
+
+struct hist_key {
+ char comm[16];
+ u64 pid_tgid;
+ u64 uid_gid;
+ u64 index;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(key_size, sizeof(struct hist_key));
+ __uint(value_size, sizeof(long));
+ __uint(max_entries, 1024);
+} my_hist_map SEC(".maps");
+
+SEC("kprobe/" SYSCALL(sys_write))
+int bpf_prog3(struct pt_regs *ctx)
+{
+ long write_size = PT_REGS_PARM3(ctx);
+ long init_val = 1;
+ long *value;
+ struct hist_key key;
+
+ key.index = log2l(write_size);
+ key.pid_tgid = bpf_get_current_pid_tgid();
+ key.uid_gid = bpf_get_current_uid_gid();
+ bpf_get_current_comm(&key.comm, sizeof(key.comm));
+
+ value = bpf_map_lookup_elem(&my_hist_map, &key);
+ if (value)
+ __sync_fetch_and_add(value, 1);
+ else
+ bpf_map_update_elem(&my_hist_map, &key, &init_val, BPF_ANY);
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
new file mode 100644
index 000000000..dd6205c6b
--- /dev/null
+++ b/samples/bpf/tracex2_user.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+
+#define MAX_INDEX 64
+#define MAX_STARS 38
+
+/* my_map, my_hist_map */
+static int map_fd[2];
+
+static void stars(char *str, long val, long max, int width)
+{
+ int i;
+
+ for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++)
+ str[i] = '*';
+ if (val > max)
+ str[i - 1] = '+';
+ str[i] = '\0';
+}
+
+struct task {
+ char comm[16];
+ __u64 pid_tgid;
+ __u64 uid_gid;
+};
+
+struct hist_key {
+ struct task t;
+ __u32 index;
+};
+
+#define SIZE sizeof(struct task)
+
+static void print_hist_for_pid(int fd, void *task)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct hist_key key = {}, next_key;
+ long values[nr_cpus];
+ char starstr[MAX_STARS];
+ long value;
+ long data[MAX_INDEX] = {};
+ int max_ind = -1;
+ long max_value = 0;
+ int i, ind;
+
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ if (memcmp(&next_key, task, SIZE)) {
+ key = next_key;
+ continue;
+ }
+ bpf_map_lookup_elem(fd, &next_key, values);
+ value = 0;
+ for (i = 0; i < nr_cpus; i++)
+ value += values[i];
+ ind = next_key.index;
+ data[ind] = value;
+ if (value && ind > max_ind)
+ max_ind = ind;
+ if (value > max_value)
+ max_value = value;
+ key = next_key;
+ }
+
+ printf(" syscall write() stats\n");
+ printf(" byte_size : count distribution\n");
+ for (i = 1; i <= max_ind + 1; i++) {
+ stars(starstr, data[i - 1], max_value, MAX_STARS);
+ printf("%8ld -> %-8ld : %-8ld |%-*s|\n",
+ (1l << i) >> 1, (1l << i) - 1, data[i - 1],
+ MAX_STARS, starstr);
+ }
+}
+
+static void print_hist(int fd)
+{
+ struct hist_key key = {}, next_key;
+ static struct task tasks[1024];
+ int task_cnt = 0;
+ int i;
+
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ int found = 0;
+
+ for (i = 0; i < task_cnt; i++)
+ if (memcmp(&tasks[i], &next_key, SIZE) == 0)
+ found = 1;
+ if (!found)
+ memcpy(&tasks[task_cnt++], &next_key, SIZE);
+ key = next_key;
+ }
+
+ for (i = 0; i < task_cnt; i++) {
+ printf("\npid %d cmd %s uid %d\n",
+ (__u32) tasks[i].pid_tgid,
+ tasks[i].comm,
+ (__u32) tasks[i].uid_gid);
+ print_hist_for_pid(fd, &tasks[i]);
+ }
+
+}
+
+static void int_exit(int sig)
+{
+ print_hist(map_fd[1]);
+ exit(0);
+}
+
+int main(int ac, char **argv)
+{
+ long key, next_key, value;
+ struct bpf_link *links[2];
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ char filename[256];
+ int i, j = 0;
+ FILE *f;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "my_map");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "my_hist_map");
+ if (map_fd[0] < 0 || map_fd[1] < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ /* start 'ping' in the background to have some kfree_skb events */
+ f = popen("ping -4 -c5 localhost", "r");
+ (void) f;
+
+ /* start 'dd' in the background to have plenty of 'write' syscalls */
+ f = popen("dd if=/dev/zero of=/dev/null count=5000000", "r");
+ (void) f;
+
+ bpf_object__for_each_program(prog, obj) {
+ links[j] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[j])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[j] = NULL;
+ goto cleanup;
+ }
+ j++;
+ }
+
+ for (i = 0; i < 5; i++) {
+ key = 0;
+ while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
+ bpf_map_lookup_elem(map_fd[0], &next_key, &value);
+ printf("location 0x%lx count %ld\n", next_key, value);
+ key = next_key;
+ }
+ if (key)
+ printf("\n");
+ sleep(1);
+ }
+ print_hist(map_fd[1]);
+
+cleanup:
+ for (j--; j >= 0; j--)
+ bpf_link__destroy(links[j]);
+
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
new file mode 100644
index 000000000..af235bd66
--- /dev/null
+++ b/samples/bpf/tracex3_kern.c
@@ -0,0 +1,102 @@
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct start_key {
+ dev_t dev;
+ u32 _pad;
+ sector_t sector;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, long);
+ __type(value, u64);
+ __uint(max_entries, 4096);
+} my_map SEC(".maps");
+
+/* from /sys/kernel/tracing/events/block/block_io_start/format */
+SEC("tracepoint/block/block_io_start")
+int bpf_prog1(struct trace_event_raw_block_rq *ctx)
+{
+ u64 val = bpf_ktime_get_ns();
+ struct start_key key = {
+ .dev = ctx->dev,
+ .sector = ctx->sector
+ };
+
+ bpf_map_update_elem(&my_map, &key, &val, BPF_ANY);
+ return 0;
+}
+
+static unsigned int log2l(unsigned long long n)
+{
+#define S(k) if (n >= (1ull << k)) { i += k; n >>= k; }
+ int i = -(n == 0);
+ S(32); S(16); S(8); S(4); S(2); S(1);
+ return i;
+#undef S
+}
+
+#define SLOTS 100
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u64));
+ __uint(max_entries, SLOTS);
+} lat_map SEC(".maps");
+
+/* from /sys/kernel/tracing/events/block/block_io_done/format */
+SEC("tracepoint/block/block_io_done")
+int bpf_prog2(struct trace_event_raw_block_rq *ctx)
+{
+ struct start_key key = {
+ .dev = ctx->dev,
+ .sector = ctx->sector
+ };
+
+ u64 *value, l, base;
+ u32 index;
+
+ value = bpf_map_lookup_elem(&my_map, &key);
+ if (!value)
+ return 0;
+
+ u64 cur_time = bpf_ktime_get_ns();
+ u64 delta = cur_time - *value;
+
+ bpf_map_delete_elem(&my_map, &key);
+
+ /* the lines below are computing index = log10(delta)*10
+ * using integer arithmetic
+ * index = 29 ~ 1 usec
+ * index = 59 ~ 1 msec
+ * index = 89 ~ 1 sec
+ * index = 99 ~ 10sec or more
+ * log10(x)*10 = log2(x)*10/log2(10) = log2(x)*3
+ */
+ l = log2l(delta);
+ base = 1ll << l;
+ index = (l * 64 + (delta - base) * 64 / base) * 3 / 64;
+
+ if (index >= SLOTS)
+ index = SLOTS - 1;
+
+ value = bpf_map_lookup_elem(&lat_map, &index);
+ if (value)
+ *value += 1;
+
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex3_user.c b/samples/bpf/tracex3_user.c
new file mode 100644
index 000000000..d5eebace3
--- /dev/null
+++ b/samples/bpf/tracex3_user.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+
+#define SLOTS 100
+
+static void clear_stats(int fd)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ __u64 values[nr_cpus];
+ __u32 key;
+
+ memset(values, 0, sizeof(values));
+ for (key = 0; key < SLOTS; key++)
+ bpf_map_update_elem(fd, &key, values, BPF_ANY);
+}
+
+const char *color[] = {
+ "\033[48;5;255m",
+ "\033[48;5;252m",
+ "\033[48;5;250m",
+ "\033[48;5;248m",
+ "\033[48;5;246m",
+ "\033[48;5;244m",
+ "\033[48;5;242m",
+ "\033[48;5;240m",
+ "\033[48;5;238m",
+ "\033[48;5;236m",
+ "\033[48;5;234m",
+ "\033[48;5;232m",
+};
+const int num_colors = ARRAY_SIZE(color);
+
+const char nocolor[] = "\033[00m";
+
+const char *sym[] = {
+ " ",
+ " ",
+ ".",
+ ".",
+ "*",
+ "*",
+ "o",
+ "o",
+ "O",
+ "O",
+ "#",
+ "#",
+};
+
+bool full_range = false;
+bool text_only = false;
+
+static void print_banner(void)
+{
+ if (full_range)
+ printf("|1ns |10ns |100ns |1us |10us |100us"
+ " |1ms |10ms |100ms |1s |10s\n");
+ else
+ printf("|1us |10us |100us |1ms |10ms "
+ "|100ms |1s |10s\n");
+}
+
+static void print_hist(int fd)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ __u64 total_events = 0;
+ long values[nr_cpus];
+ __u64 max_cnt = 0;
+ __u64 cnt[SLOTS];
+ __u64 value;
+ __u32 key;
+ int i;
+
+ for (key = 0; key < SLOTS; key++) {
+ bpf_map_lookup_elem(fd, &key, values);
+ value = 0;
+ for (i = 0; i < nr_cpus; i++)
+ value += values[i];
+ cnt[key] = value;
+ total_events += value;
+ if (value > max_cnt)
+ max_cnt = value;
+ }
+ clear_stats(fd);
+ for (key = full_range ? 0 : 29; key < SLOTS; key++) {
+ int c = num_colors * cnt[key] / (max_cnt + 1);
+
+ if (text_only)
+ printf("%s", sym[c]);
+ else
+ printf("%s %s", color[c], nocolor);
+ }
+ printf(" # %lld\n", total_events);
+}
+
+int main(int ac, char **argv)
+{
+ struct bpf_link *links[2];
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ char filename[256];
+ int map_fd, i, j = 0;
+
+ for (i = 1; i < ac; i++) {
+ if (strcmp(argv[i], "-a") == 0) {
+ full_range = true;
+ } else if (strcmp(argv[i], "-t") == 0) {
+ text_only = true;
+ } else if (strcmp(argv[i], "-h") == 0) {
+ printf("Usage:\n"
+ " -a display wider latency range\n"
+ " -t text only\n");
+ return 1;
+ }
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "lat_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ links[j] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[j])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[j] = NULL;
+ goto cleanup;
+ }
+ j++;
+ }
+
+ printf(" heatmap of IO latency\n");
+ if (text_only)
+ printf(" %s", sym[num_colors - 1]);
+ else
+ printf(" %s %s", color[num_colors - 1], nocolor);
+ printf(" - many events with this latency\n");
+
+ if (text_only)
+ printf(" %s", sym[0]);
+ else
+ printf(" %s %s", color[0], nocolor);
+ printf(" - few events\n");
+
+ for (i = 0; ; i++) {
+ if (i % 20 == 0)
+ print_banner();
+ print_hist(map_fd);
+ sleep(2);
+ }
+
+cleanup:
+ for (j--; j >= 0; j--)
+ bpf_link__destroy(links[j]);
+
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c
new file mode 100644
index 000000000..eb0f8fdd1
--- /dev/null
+++ b/samples/bpf/tracex4_kern.c
@@ -0,0 +1,55 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct pair {
+ u64 val;
+ u64 ip;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, long);
+ __type(value, struct pair);
+ __uint(max_entries, 1000000);
+} my_map SEC(".maps");
+
+/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
+ * example will no longer be meaningful
+ */
+SEC("kprobe/kmem_cache_free")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ long ptr = PT_REGS_PARM2(ctx);
+
+ bpf_map_delete_elem(&my_map, &ptr);
+ return 0;
+}
+
+SEC("kretprobe/kmem_cache_alloc_node")
+int bpf_prog2(struct pt_regs *ctx)
+{
+ long ptr = PT_REGS_RC(ctx);
+ long ip = 0;
+
+ /* get ip address of kmem_cache_alloc_node() caller */
+ BPF_KRETPROBE_READ_RET_IP(ip, ctx);
+
+ struct pair v = {
+ .val = bpf_ktime_get_ns(),
+ .ip = ip,
+ };
+
+ bpf_map_update_elem(&my_map, &ptr, &v, BPF_ANY);
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex4_user.c b/samples/bpf/tracex4_user.c
new file mode 100644
index 000000000..227b05a0b
--- /dev/null
+++ b/samples/bpf/tracex4_user.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <string.h>
+#include <time.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+struct pair {
+ long long val;
+ __u64 ip;
+};
+
+static __u64 time_get_ns(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+static void print_old_objects(int fd)
+{
+ long long val = time_get_ns();
+ __u64 key, next_key;
+ struct pair v;
+
+ key = write(1, "\e[1;1H\e[2J", 11); /* clear screen */
+
+ key = -1;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ bpf_map_lookup_elem(fd, &next_key, &v);
+ key = next_key;
+ if (val - v.val < 1000000000ll)
+ /* object was allocated more then 1 sec ago */
+ continue;
+ printf("obj 0x%llx is %2lldsec old was allocated at ip %llx\n",
+ next_key, (val - v.val) / 1000000000ll, v.ip);
+ }
+}
+
+int main(int ac, char **argv)
+{
+ struct bpf_link *links[2];
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ char filename[256];
+ int map_fd, i, j = 0;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "my_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ links[j] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[j])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[j] = NULL;
+ goto cleanup;
+ }
+ j++;
+ }
+
+ for (i = 0; ; i++) {
+ print_old_objects(map_fd);
+ sleep(1);
+ }
+
+cleanup:
+ for (j--; j >= 0; j--)
+ bpf_link__destroy(links[j]);
+
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
new file mode 100644
index 000000000..64a1f7550
--- /dev/null
+++ b/samples/bpf/tracex5_kern.c
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/seccomp.h>
+#include <uapi/linux/unistd.h>
+#include "syscall_nrs.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u32));
+#ifdef __mips__
+ __uint(max_entries, 6000); /* MIPS n64 syscalls start at 5000 */
+#else
+ __uint(max_entries, 1024);
+#endif
+} progs SEC(".maps");
+
+SEC("kprobe/__seccomp_filter")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ int sc_nr = (int)PT_REGS_PARM1(ctx);
+
+ /* dispatch into next BPF program depending on syscall number */
+ bpf_tail_call(ctx, &progs, sc_nr);
+
+ /* fall through -> unknown syscall */
+ if (sc_nr >= __NR_getuid && sc_nr <= __NR_getsid) {
+ char fmt[] = "syscall=%d (one of get/set uid/pid/gid)\n";
+ bpf_trace_printk(fmt, sizeof(fmt), sc_nr);
+ }
+ return 0;
+}
+
+/* we jump here when syscall number == __NR_write */
+PROG(SYS__NR_write)(struct pt_regs *ctx)
+{
+ struct seccomp_data sd;
+
+ bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
+ if (sd.args[2] == 512) {
+ char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
+ bpf_trace_printk(fmt, sizeof(fmt),
+ sd.args[0], sd.args[1], sd.args[2]);
+ }
+ return 0;
+}
+
+PROG(SYS__NR_read)(struct pt_regs *ctx)
+{
+ struct seccomp_data sd;
+
+ bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
+ if (sd.args[2] > 128 && sd.args[2] <= 1024) {
+ char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
+ bpf_trace_printk(fmt, sizeof(fmt),
+ sd.args[0], sd.args[1], sd.args[2]);
+ }
+ return 0;
+}
+
+#ifdef __NR_mmap2
+PROG(SYS__NR_mmap2)(struct pt_regs *ctx)
+{
+ char fmt[] = "mmap2\n";
+
+ bpf_trace_printk(fmt, sizeof(fmt));
+ return 0;
+}
+#endif
+
+#ifdef __NR_mmap
+PROG(SYS__NR_mmap)(struct pt_regs *ctx)
+{
+ char fmt[] = "mmap\n";
+
+ bpf_trace_printk(fmt, sizeof(fmt));
+ return 0;
+}
+#endif
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
new file mode 100644
index 000000000..9d7d79f0d
--- /dev/null
+++ b/samples/bpf/tracex5_user.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <sys/prctl.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "trace_helpers.h"
+#include "bpf_util.h"
+
+#ifdef __mips__
+#define MAX_ENTRIES 6000 /* MIPS n64 syscalls start at 5000 */
+#else
+#define MAX_ENTRIES 1024
+#endif
+
+/* install fake seccomp program to enable seccomp code path inside the kernel,
+ * so that our kprobe attached to seccomp_phase1() can be triggered
+ */
+static void install_accept_all_seccomp(void)
+{
+ struct sock_filter filter[] = {
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+ };
+ struct sock_fprog prog = {
+ .len = (unsigned short)ARRAY_SIZE(filter),
+ .filter = filter,
+ };
+ if (prctl(PR_SET_SECCOMP, 2, &prog))
+ perror("prctl");
+}
+
+int main(int ac, char **argv)
+{
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ int key, fd, progs_fd;
+ const char *section;
+ char filename[256];
+ FILE *f;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (!prog) {
+ printf("finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
+ }
+
+ progs_fd = bpf_object__find_map_fd_by_name(obj, "progs");
+ if (progs_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ section = bpf_program__section_name(prog);
+ /* register only syscalls to PROG_ARRAY */
+ if (sscanf(section, "kprobe/%d", &key) != 1)
+ continue;
+
+ fd = bpf_program__fd(prog);
+ bpf_map_update_elem(progs_fd, &key, &fd, BPF_ANY);
+ }
+
+ install_accept_all_seccomp();
+
+ f = popen("dd if=/dev/zero of=/dev/null count=5", "r");
+ (void) f;
+
+ read_trace_pipe();
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c
new file mode 100644
index 000000000..fd602c277
--- /dev/null
+++ b/samples/bpf/tracex6_kern.c
@@ -0,0 +1,82 @@
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(u32));
+ __uint(max_entries, 64);
+} counters SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, u64);
+ __uint(max_entries, 64);
+} values SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct bpf_perf_event_value);
+ __uint(max_entries, 64);
+} values2 SEC(".maps");
+
+SEC("kprobe/htab_map_get_next_key")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ u32 key = bpf_get_smp_processor_id();
+ u64 count, *val;
+ s64 error;
+
+ count = bpf_perf_event_read(&counters, key);
+ error = (s64)count;
+ if (error <= -2 && error >= -22)
+ return 0;
+
+ val = bpf_map_lookup_elem(&values, &key);
+ if (val)
+ *val = count;
+ else
+ bpf_map_update_elem(&values, &key, &count, BPF_NOEXIST);
+
+ return 0;
+}
+
+/*
+ * Since *_map_lookup_elem can't be expected to trigger bpf programs
+ * due to potential deadlocks (bpf_disable_instrumentation), this bpf
+ * program will be attached to bpf_map_copy_value (which is called
+ * from map_lookup_elem) and will only filter the hashtable type.
+ */
+SEC("kprobe/bpf_map_copy_value")
+int BPF_KPROBE(bpf_prog2, struct bpf_map *map)
+{
+ u32 key = bpf_get_smp_processor_id();
+ struct bpf_perf_event_value *val, buf;
+ enum bpf_map_type type;
+ int error;
+
+ type = BPF_CORE_READ(map, map_type);
+ if (type != BPF_MAP_TYPE_HASH)
+ return 0;
+
+ error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf));
+ if (error)
+ return 0;
+
+ val = bpf_map_lookup_elem(&values2, &key);
+ if (val)
+ *val = buf;
+ else
+ bpf_map_update_elem(&values2, &key, &buf, BPF_NOEXIST);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c
new file mode 100644
index 000000000..8e83bf2a8
--- /dev/null
+++ b/samples/bpf/tracex6_user.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <assert.h>
+#include <fcntl.h>
+#include <linux/perf_event.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "perf-sys.h"
+
+#define SAMPLE_PERIOD 0x7fffffffffffffffULL
+
+/* counters, values, values2 */
+static int map_fd[3];
+
+static void check_on_cpu(int cpu, struct perf_event_attr *attr)
+{
+ struct bpf_perf_event_value value2;
+ int pmu_fd, error = 0;
+ cpu_set_t set;
+ __u64 value;
+
+ /* Move to target CPU */
+ CPU_ZERO(&set);
+ CPU_SET(cpu, &set);
+ assert(sched_setaffinity(0, sizeof(set), &set) == 0);
+ /* Open perf event and attach to the perf_event_array */
+ pmu_fd = sys_perf_event_open(attr, -1/*pid*/, cpu/*cpu*/, -1/*group_fd*/, 0);
+ if (pmu_fd < 0) {
+ fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu);
+ error = 1;
+ goto on_exit;
+ }
+ assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0);
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
+ /* Trigger the kprobe */
+ bpf_map_get_next_key(map_fd[1], &cpu, NULL);
+ /* Check the value */
+ if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) {
+ fprintf(stderr, "Value missing for CPU %d\n", cpu);
+ error = 1;
+ goto on_exit;
+ } else {
+ fprintf(stderr, "CPU %d: %llu\n", cpu, value);
+ }
+ /* The above bpf_map_lookup_elem should trigger the second kprobe */
+ if (bpf_map_lookup_elem(map_fd[2], &cpu, &value2)) {
+ fprintf(stderr, "Value2 missing for CPU %d\n", cpu);
+ error = 1;
+ goto on_exit;
+ } else {
+ fprintf(stderr, "CPU %d: counter: %llu, enabled: %llu, running: %llu\n", cpu,
+ value2.counter, value2.enabled, value2.running);
+ }
+
+on_exit:
+ assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error);
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE, 0) == 0 || error);
+ assert(close(pmu_fd) == 0 || error);
+ assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error);
+ exit(error);
+}
+
+static void test_perf_event_array(struct perf_event_attr *attr,
+ const char *name)
+{
+ int i, status, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ pid_t pid[nr_cpus];
+ int err = 0;
+
+ printf("Test reading %s counters\n", name);
+
+ for (i = 0; i < nr_cpus; i++) {
+ pid[i] = fork();
+ assert(pid[i] >= 0);
+ if (pid[i] == 0) {
+ check_on_cpu(i, attr);
+ exit(1);
+ }
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ assert(waitpid(pid[i], &status, 0) == pid[i]);
+ err |= status;
+ }
+
+ if (err)
+ printf("Test: %s FAILED\n", name);
+}
+
+static void test_bpf_perf_event(void)
+{
+ struct perf_event_attr attr_cycles = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_HARDWARE,
+ .read_format = 0,
+ .sample_type = 0,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ struct perf_event_attr attr_clock = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_SOFTWARE,
+ .read_format = 0,
+ .sample_type = 0,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ };
+ struct perf_event_attr attr_raw = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_RAW,
+ .read_format = 0,
+ .sample_type = 0,
+ /* Intel Instruction Retired */
+ .config = 0xc0,
+ };
+ struct perf_event_attr attr_l1d_load = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_HW_CACHE,
+ .read_format = 0,
+ .sample_type = 0,
+ .config =
+ PERF_COUNT_HW_CACHE_L1D |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ };
+ struct perf_event_attr attr_llc_miss = {
+ .freq = 0,
+ .sample_period = SAMPLE_PERIOD,
+ .inherit = 0,
+ .type = PERF_TYPE_HW_CACHE,
+ .read_format = 0,
+ .sample_type = 0,
+ .config =
+ PERF_COUNT_HW_CACHE_LL |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ };
+ struct perf_event_attr attr_msr_tsc = {
+ .freq = 0,
+ .sample_period = 0,
+ .inherit = 0,
+ /* From /sys/bus/event_source/devices/msr/ */
+ .type = 7,
+ .read_format = 0,
+ .sample_type = 0,
+ .config = 0,
+ };
+
+ test_perf_event_array(&attr_cycles, "HARDWARE-cycles");
+ test_perf_event_array(&attr_clock, "SOFTWARE-clock");
+ test_perf_event_array(&attr_raw, "RAW-instruction-retired");
+ test_perf_event_array(&attr_l1d_load, "HW_CACHE-L1D-load");
+
+ /* below tests may fail in qemu */
+ test_perf_event_array(&attr_llc_miss, "HW_CACHE-LLC-miss");
+ test_perf_event_array(&attr_msr_tsc, "Dynamic-msr-tsc");
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_link *links[2];
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ char filename[256];
+ int i = 0;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counters");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "values");
+ map_fd[2] = bpf_object__find_map_fd_by_name(obj, "values2");
+ if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ links[i] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[i])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[i] = NULL;
+ goto cleanup;
+ }
+ i++;
+ }
+
+ test_bpf_perf_event();
+
+cleanup:
+ for (i--; i >= 0; i--)
+ bpf_link__destroy(links[i]);
+
+ bpf_object__close(obj);
+ return 0;
+}
diff --git a/samples/bpf/tracex7_kern.c b/samples/bpf/tracex7_kern.c
new file mode 100644
index 000000000..c5a92df8a
--- /dev/null
+++ b/samples/bpf/tracex7_kern.c
@@ -0,0 +1,16 @@
+#include <uapi/linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("kprobe/open_ctree")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ unsigned long rc = -12;
+
+ bpf_override_return(ctx, rc);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
new file mode 100644
index 000000000..8be7ce18d
--- /dev/null
+++ b/samples/bpf/tracex7_user.c
@@ -0,0 +1,56 @@
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <unistd.h>
+#include <bpf/libbpf.h>
+
+int main(int argc, char **argv)
+{
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ char filename[256];
+ char command[256];
+ int ret = 0;
+ FILE *f;
+
+ if (!argv[1]) {
+ fprintf(stderr, "ERROR: Run with the btrfs device argument!\n");
+ return 0;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (!prog) {
+ fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
+ }
+
+ snprintf(command, 256, "mount %s tmpmnt/", argv[1]);
+ f = popen(command, "r");
+ ret = pclose(f);
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
+ return ret ? 0 : 1;
+}
diff --git a/samples/bpf/xdp1_kern.c b/samples/bpf/xdp1_kern.c
new file mode 100644
index 000000000..d91f27cbc
--- /dev/null
+++ b/samples/bpf/xdp1_kern.c
@@ -0,0 +1,100 @@
+/* Copyright (c) 2016 PLUMgrid
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 256);
+} rxcnt SEC(".maps");
+
+static int parse_ipv4(void *data, u64 nh_off, void *data_end)
+{
+ struct iphdr *iph = data + nh_off;
+
+ if (iph + 1 > data_end)
+ return 0;
+ return iph->protocol;
+}
+
+static int parse_ipv6(void *data, u64 nh_off, void *data_end)
+{
+ struct ipv6hdr *ip6h = data + nh_off;
+
+ if (ip6h + 1 > data_end)
+ return 0;
+ return ip6h->nexthdr;
+}
+
+#define XDPBUFSIZE 60
+SEC("xdp.frags")
+int xdp_prog1(struct xdp_md *ctx)
+{
+ __u8 pkt[XDPBUFSIZE] = {};
+ void *data_end = &pkt[XDPBUFSIZE-1];
+ void *data = pkt;
+ struct ethhdr *eth = data;
+ int rc = XDP_DROP;
+ long *value;
+ u16 h_proto;
+ u64 nh_off;
+ u32 ipproto;
+
+ if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt)))
+ return rc;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return rc;
+
+ h_proto = eth->h_proto;
+
+ /* Handle VLAN tagged packet */
+ if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) {
+ struct vlan_hdr *vhdr;
+
+ vhdr = data + nh_off;
+ nh_off += sizeof(struct vlan_hdr);
+ if (data + nh_off > data_end)
+ return rc;
+ h_proto = vhdr->h_vlan_encapsulated_proto;
+ }
+ /* Handle double VLAN tagged packet */
+ if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) {
+ struct vlan_hdr *vhdr;
+
+ vhdr = data + nh_off;
+ nh_off += sizeof(struct vlan_hdr);
+ if (data + nh_off > data_end)
+ return rc;
+ h_proto = vhdr->h_vlan_encapsulated_proto;
+ }
+
+ if (h_proto == htons(ETH_P_IP))
+ ipproto = parse_ipv4(data, nh_off, data_end);
+ else if (h_proto == htons(ETH_P_IPV6))
+ ipproto = parse_ipv6(data, nh_off, data_end);
+ else
+ ipproto = 0;
+
+ value = bpf_map_lookup_elem(&rxcnt, &ipproto);
+ if (value)
+ *value += 1;
+
+ return rc;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
new file mode 100644
index 000000000..281dc964d
--- /dev/null
+++ b/samples/bpf/xdp1_user.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 PLUMgrid
+ */
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <libgen.h>
+#include <net/if.h>
+
+#include "bpf_util.h"
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+static int ifindex;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static __u32 prog_id;
+
+static void int_exit(int sig)
+{
+ __u32 curr_prog_id = 0;
+
+ if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
+ printf("bpf_xdp_query_id failed\n");
+ exit(1);
+ }
+ if (prog_id == curr_prog_id)
+ bpf_xdp_detach(ifindex, xdp_flags, NULL);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on a given interface\n");
+ else
+ printf("program on interface changed, not removing\n");
+ exit(0);
+}
+
+/* simple per-protocol drop counter
+ */
+static void poll_stats(int map_fd, int interval)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ __u64 values[nr_cpus], prev[UINT8_MAX] = { 0 };
+ int i;
+
+ while (1) {
+ __u32 key = UINT32_MAX;
+
+ sleep(interval);
+
+ while (bpf_map_get_next_key(map_fd, &key, &key) == 0) {
+ __u64 sum = 0;
+
+ assert(bpf_map_lookup_elem(map_fd, &key, values) == 0);
+ for (i = 0; i < nr_cpus; i++)
+ sum += values[i];
+ if (sum > prev[key])
+ printf("proto %u: %10llu pkt/s\n",
+ key, (sum - prev[key]) / interval);
+ prev[key] = sum;
+ }
+ }
+}
+
+static void usage(const char *prog)
+{
+ fprintf(stderr,
+ "usage: %s [OPTS] IFACE\n\n"
+ "OPTS:\n"
+ " -S use skb-mode\n"
+ " -N enforce native mode\n"
+ " -F force loading prog\n",
+ prog);
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ const char *optstr = "FSN";
+ int prog_fd, map_fd, opt;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ char filename[256];
+ int err;
+
+ while ((opt = getopt(argc, argv, optstr)) != -1) {
+ switch (opt) {
+ case 'S':
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ case 'N':
+ /* default, set below */
+ break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
+ default:
+ usage(basename(argv[0]));
+ return 1;
+ }
+ }
+
+ if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
+ xdp_flags |= XDP_FLAGS_DRV_MODE;
+
+ if (optind == argc) {
+ usage(basename(argv[0]));
+ return 1;
+ }
+
+ ifindex = if_nametoindex(argv[optind]);
+ if (!ifindex) {
+ perror("if_nametoindex");
+ return 1;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj))
+ return 1;
+
+ prog = bpf_object__next_program(obj, NULL);
+ bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
+
+ err = bpf_object__load(obj);
+ if (err)
+ return 1;
+
+ prog_fd = bpf_program__fd(prog);
+
+ map = bpf_object__next_map(obj, NULL);
+ if (!map) {
+ printf("finding a map in obj file failed\n");
+ return 1;
+ }
+ map_fd = bpf_map__fd(map);
+
+ if (!prog_fd) {
+ printf("bpf_prog_load_xattr: %s\n", strerror(errno));
+ return 1;
+ }
+
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) {
+ printf("link set xdp fd failed\n");
+ return 1;
+ }
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
+ }
+ prog_id = info.id;
+
+ poll_stats(map_fd, 1);
+
+ return 0;
+}
diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c
new file mode 100644
index 000000000..8bca67445
--- /dev/null
+++ b/samples/bpf/xdp2_kern.c
@@ -0,0 +1,125 @@
+/* Copyright (c) 2016 PLUMgrid
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, long);
+ __uint(max_entries, 256);
+} rxcnt SEC(".maps");
+
+static void swap_src_dst_mac(void *data)
+{
+ unsigned short *p = data;
+ unsigned short dst[3];
+
+ dst[0] = p[0];
+ dst[1] = p[1];
+ dst[2] = p[2];
+ p[0] = p[3];
+ p[1] = p[4];
+ p[2] = p[5];
+ p[3] = dst[0];
+ p[4] = dst[1];
+ p[5] = dst[2];
+}
+
+static int parse_ipv4(void *data, u64 nh_off, void *data_end)
+{
+ struct iphdr *iph = data + nh_off;
+
+ if (iph + 1 > data_end)
+ return 0;
+ return iph->protocol;
+}
+
+static int parse_ipv6(void *data, u64 nh_off, void *data_end)
+{
+ struct ipv6hdr *ip6h = data + nh_off;
+
+ if (ip6h + 1 > data_end)
+ return 0;
+ return ip6h->nexthdr;
+}
+
+#define XDPBUFSIZE 60
+SEC("xdp.frags")
+int xdp_prog1(struct xdp_md *ctx)
+{
+ __u8 pkt[XDPBUFSIZE] = {};
+ void *data_end = &pkt[XDPBUFSIZE-1];
+ void *data = pkt;
+ struct ethhdr *eth = data;
+ int rc = XDP_DROP;
+ long *value;
+ u16 h_proto;
+ u64 nh_off;
+ u32 ipproto;
+
+ if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt)))
+ return rc;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return rc;
+
+ h_proto = eth->h_proto;
+
+ /* Handle VLAN tagged packet */
+ if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) {
+ struct vlan_hdr *vhdr;
+
+ vhdr = data + nh_off;
+ nh_off += sizeof(struct vlan_hdr);
+ if (data + nh_off > data_end)
+ return rc;
+ h_proto = vhdr->h_vlan_encapsulated_proto;
+ }
+ /* Handle double VLAN tagged packet */
+ if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) {
+ struct vlan_hdr *vhdr;
+
+ vhdr = data + nh_off;
+ nh_off += sizeof(struct vlan_hdr);
+ if (data + nh_off > data_end)
+ return rc;
+ h_proto = vhdr->h_vlan_encapsulated_proto;
+ }
+
+ if (h_proto == htons(ETH_P_IP))
+ ipproto = parse_ipv4(data, nh_off, data_end);
+ else if (h_proto == htons(ETH_P_IPV6))
+ ipproto = parse_ipv6(data, nh_off, data_end);
+ else
+ ipproto = 0;
+
+ value = bpf_map_lookup_elem(&rxcnt, &ipproto);
+ if (value)
+ *value += 1;
+
+ if (ipproto == IPPROTO_UDP) {
+ swap_src_dst_mac(data);
+
+ if (bpf_xdp_store_bytes(ctx, 0, pkt, sizeof(pkt)))
+ return rc;
+
+ rc = XDP_TX;
+ }
+
+ return rc;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp2skb_meta.sh b/samples/bpf/xdp2skb_meta.sh
new file mode 100755
index 000000000..4bde9d066
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta.sh
@@ -0,0 +1,220 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+#
+# Bash-shell example on using iproute2 tools 'tc' and 'ip' to load
+# eBPF programs, both for XDP and clsbpf. Shell script function
+# wrappers and even long options parsing is illustrated, for ease of
+# use.
+#
+# Related to sample/bpf/xdp2skb_meta_kern.c, which contains BPF-progs
+# that need to collaborate between XDP and TC hooks. Thus, it is
+# convenient that the same tool load both programs that need to work
+# together.
+#
+BPF_FILE=xdp2skb_meta_kern.o
+DIR=$(dirname $0)
+
+[ -z "$TC" ] && TC=tc
+[ -z "$IP" ] && IP=ip
+
+function usage() {
+ echo ""
+ echo "Usage: $0 [-vfh] --dev ethX"
+ echo " -d | --dev : Network device (required)"
+ echo " --flush : Cleanup flush TC and XDP progs"
+ echo " --list : (\$LIST) List TC and XDP progs"
+ echo " -v | --verbose : (\$VERBOSE) Verbose"
+ echo " --dry-run : (\$DRYRUN) Dry-run only (echo commands)"
+ echo ""
+}
+
+## -- General shell logging cmds --
+function err() {
+ local exitcode=$1
+ shift
+ echo "ERROR: $@" >&2
+ exit $exitcode
+}
+
+function info() {
+ if [[ -n "$VERBOSE" ]]; then
+ echo "# $@"
+ fi
+}
+
+## -- Helper function calls --
+
+# Wrapper call for TC and IP
+# - Will display the offending command on failure
+function _call_cmd() {
+ local cmd="$1"
+ local allow_fail="$2"
+ shift 2
+ if [[ -n "$VERBOSE" ]]; then
+ echo "$cmd $@"
+ fi
+ if [[ -n "$DRYRUN" ]]; then
+ return
+ fi
+ $cmd "$@"
+ local status=$?
+ if (( $status != 0 )); then
+ if [[ "$allow_fail" == "" ]]; then
+ err 2 "Exec error($status) occurred cmd: \"$cmd $@\""
+ fi
+ fi
+}
+function call_tc() {
+ _call_cmd "$TC" "" "$@"
+}
+function call_tc_allow_fail() {
+ _call_cmd "$TC" "allow_fail" "$@"
+}
+function call_ip() {
+ _call_cmd "$IP" "" "$@"
+}
+
+## --- Parse command line arguments / parameters ---
+# Using external program "getopt" to get --long-options
+OPTIONS=$(getopt -o vfhd: \
+ --long verbose,flush,help,list,dev:,dry-run -- "$@")
+if (( $? != 0 )); then
+ err 4 "Error calling getopt"
+fi
+eval set -- "$OPTIONS"
+
+unset DEV
+unset FLUSH
+while true; do
+ case "$1" in
+ -d | --dev ) # device
+ DEV=$2
+ info "Device set to: DEV=$DEV" >&2
+ shift 2
+ ;;
+ -v | --verbose)
+ VERBOSE=yes
+ # info "Verbose mode: VERBOSE=$VERBOSE" >&2
+ shift
+ ;;
+ --dry-run )
+ DRYRUN=yes
+ VERBOSE=yes
+ info "Dry-run mode: enable VERBOSE and don't call TC+IP" >&2
+ shift
+ ;;
+ -f | --flush )
+ FLUSH=yes
+ shift
+ ;;
+ --list )
+ LIST=yes
+ shift
+ ;;
+ -- )
+ shift
+ break
+ ;;
+ -h | --help )
+ usage;
+ exit 0
+ ;;
+ * )
+ shift
+ break
+ ;;
+ esac
+done
+
+FILE="$DIR/$BPF_FILE"
+if [[ ! -e $FILE ]]; then
+ err 3 "Missing BPF object file ($FILE)"
+fi
+
+if [[ -z $DEV ]]; then
+ usage
+ err 2 "Please specify network device -- required option --dev"
+fi
+
+## -- Function calls --
+
+function list_tc()
+{
+ local device="$1"
+ shift
+ info "Listing current TC ingress rules"
+ call_tc filter show dev $device ingress
+}
+
+function list_xdp()
+{
+ local device="$1"
+ shift
+ info "Listing current XDP device($device) setting"
+ call_ip link show dev $device | grep --color=auto xdp
+}
+
+function flush_tc()
+{
+ local device="$1"
+ shift
+ info "Flush TC on device: $device"
+ call_tc_allow_fail filter del dev $device ingress
+ call_tc_allow_fail qdisc del dev $device clsact
+}
+
+function flush_xdp()
+{
+ local device="$1"
+ shift
+ info "Flush XDP on device: $device"
+ call_ip link set dev $device xdp off
+}
+
+function attach_tc_mark()
+{
+ local device="$1"
+ local file="$2"
+ local prog="tc_mark"
+ shift 2
+
+ # Re-attach clsact to clear/flush existing role
+ call_tc_allow_fail qdisc del dev $device clsact 2> /dev/null
+ call_tc qdisc add dev $device clsact
+
+ # Attach BPF prog
+ call_tc filter add dev $device ingress \
+ prio 1 handle 1 bpf da obj $file sec $prog
+}
+
+function attach_xdp_mark()
+{
+ local device="$1"
+ local file="$2"
+ local prog="xdp_mark"
+ shift 2
+
+ # Remove XDP prog in-case it's already loaded
+ # TODO: Need ip-link option to override/replace existing XDP prog
+ flush_xdp $device
+
+ # Attach XDP/BPF prog
+ call_ip link set dev $device xdp obj $file sec $prog
+}
+
+if [[ -n $FLUSH ]]; then
+ flush_tc $DEV
+ flush_xdp $DEV
+ exit 0
+fi
+
+if [[ -n $LIST ]]; then
+ list_tc $DEV
+ list_xdp $DEV
+ exit 0
+fi
+
+attach_tc_mark $DEV $FILE
+attach_xdp_mark $DEV $FILE
diff --git a/samples/bpf/xdp2skb_meta_kern.c b/samples/bpf/xdp2skb_meta_kern.c
new file mode 100644
index 000000000..d5631014a
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta_kern.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ * Example howto transfer info from XDP to SKB, e.g. skb->mark
+ * -----------------------------------------------------------
+ * This uses the XDP data_meta infrastructure, and is a cooperation
+ * between two bpf-programs (1) XDP and (2) clsact at TC-ingress hook.
+ *
+ * Notice: This example does not use the BPF C-loader,
+ * but instead rely on the iproute2 TC tool for loading BPF-objects.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/pkt_cls.h>
+
+#include <bpf/bpf_helpers.h>
+
+/*
+ * This struct is stored in the XDP 'data_meta' area, which is located
+ * just in-front-of the raw packet payload data. The meaning is
+ * specific to these two BPF programs that use it as a communication
+ * channel. XDP adjust/increase the area via a bpf-helper, and TC use
+ * boundary checks to see if data have been provided.
+ *
+ * The struct must be 4 byte aligned, which here is enforced by the
+ * struct __attribute__((aligned(4))).
+ */
+struct meta_info {
+ __u32 mark;
+} __attribute__((aligned(4)));
+
+SEC("xdp_mark")
+int _xdp_mark(struct xdp_md *ctx)
+{
+ struct meta_info *meta;
+ void *data, *data_end;
+ int ret;
+
+ /* Reserve space in-front of data pointer for our meta info.
+ * (Notice drivers not supporting data_meta will fail here!)
+ */
+ ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta));
+ if (ret < 0)
+ return XDP_ABORTED;
+
+ /* Notice: Kernel-side verifier requires that loading of
+ * ctx->data MUST happen _after_ helper bpf_xdp_adjust_meta(),
+ * as pkt-data pointers are invalidated. Helpers that require
+ * this are determined/marked by bpf_helper_changes_pkt_data()
+ */
+ data = (void *)(unsigned long)ctx->data;
+
+ /* Check data_meta have room for meta_info struct */
+ meta = (void *)(unsigned long)ctx->data_meta;
+ if (meta + 1 > data)
+ return XDP_ABORTED;
+
+ meta->mark = 42;
+
+ return XDP_PASS;
+}
+
+SEC("tc_mark")
+int _tc_mark(struct __sk_buff *ctx)
+{
+ void *data = (void *)(unsigned long)ctx->data;
+ void *data_end = (void *)(unsigned long)ctx->data_end;
+ void *data_meta = (void *)(unsigned long)ctx->data_meta;
+ struct meta_info *meta = data_meta;
+
+ /* Check XDP gave us some data_meta */
+ if (meta + 1 > data) {
+ ctx->mark = 41;
+ /* Skip "accept" if no data_meta is avail */
+ return TC_ACT_OK;
+ }
+
+ /* Hint: See func tc_cls_act_is_valid_access() for BPF_WRITE access */
+ ctx->mark = meta->mark; /* Transfer XDP-mark to SKB-mark */
+
+ return TC_ACT_OK;
+}
+
+/* Manually attaching these programs:
+export DEV=ixgbe2
+export FILE=xdp2skb_meta_kern.o
+
+# via TC command
+tc qdisc del dev $DEV clsact 2> /dev/null
+tc qdisc add dev $DEV clsact
+tc filter add dev $DEV ingress prio 1 handle 1 bpf da obj $FILE sec tc_mark
+tc filter show dev $DEV ingress
+
+# XDP via IP command:
+ip link set dev $DEV xdp off
+ip link set dev $DEV xdp obj $FILE sec xdp_mark
+
+# Use iptable to "see" if SKBs are marked
+iptables -I INPUT -p icmp -m mark --mark 41 # == 0x29
+iptables -I INPUT -p icmp -m mark --mark 42 # == 0x2a
+
+# Hint: catch XDP_ABORTED errors via
+perf record -e xdp:*
+perf script
+
+*/
diff --git a/samples/bpf/xdp_adjust_tail_kern.c b/samples/bpf/xdp_adjust_tail_kern.c
new file mode 100644
index 000000000..ffdd54862
--- /dev/null
+++ b/samples/bpf/xdp_adjust_tail_kern.c
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program shows how to use bpf_xdp_adjust_tail() by
+ * generating ICMPv4 "packet to big" (unreachable/ df bit set frag needed
+ * to be more preice in case of v4)" where receiving packets bigger then
+ * 600 bytes.
+ */
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <bpf/bpf_helpers.h>
+
+#define DEFAULT_TTL 64
+#define MAX_PCKT_SIZE 600
+#define ICMP_TOOBIG_SIZE 98
+#define ICMP_TOOBIG_PAYLOAD_SIZE 92
+
+/* volatile to prevent compiler optimizations */
+static volatile __u32 max_pcktsz = MAX_PCKT_SIZE;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(max_entries, 1);
+} icmpcnt SEC(".maps");
+
+static __always_inline void count_icmp(void)
+{
+ u64 key = 0;
+ u64 *icmp_count;
+
+ icmp_count = bpf_map_lookup_elem(&icmpcnt, &key);
+ if (icmp_count)
+ *icmp_count += 1;
+}
+
+static __always_inline void swap_mac(void *data, struct ethhdr *orig_eth)
+{
+ struct ethhdr *eth;
+
+ eth = data;
+ memcpy(eth->h_source, orig_eth->h_dest, ETH_ALEN);
+ memcpy(eth->h_dest, orig_eth->h_source, ETH_ALEN);
+ eth->h_proto = orig_eth->h_proto;
+}
+
+static __always_inline __u16 csum_fold_helper(__u32 csum)
+{
+ return ~((csum & 0xffff) + (csum >> 16));
+}
+
+static __always_inline void ipv4_csum(void *data_start, int data_size,
+ __u32 *csum)
+{
+ *csum = bpf_csum_diff(0, 0, data_start, data_size, *csum);
+ *csum = csum_fold_helper(*csum);
+}
+
+static __always_inline int send_icmp4_too_big(struct xdp_md *xdp)
+{
+ int headroom = (int)sizeof(struct iphdr) + (int)sizeof(struct icmphdr);
+
+ if (bpf_xdp_adjust_head(xdp, 0 - headroom))
+ return XDP_DROP;
+ void *data = (void *)(long)xdp->data;
+ void *data_end = (void *)(long)xdp->data_end;
+
+ if (data + (ICMP_TOOBIG_SIZE + headroom) > data_end)
+ return XDP_DROP;
+
+ struct iphdr *iph, *orig_iph;
+ struct icmphdr *icmp_hdr;
+ struct ethhdr *orig_eth;
+ __u32 csum = 0;
+ __u64 off = 0;
+
+ orig_eth = data + headroom;
+ swap_mac(data, orig_eth);
+ off += sizeof(struct ethhdr);
+ iph = data + off;
+ off += sizeof(struct iphdr);
+ icmp_hdr = data + off;
+ off += sizeof(struct icmphdr);
+ orig_iph = data + off;
+ icmp_hdr->type = ICMP_DEST_UNREACH;
+ icmp_hdr->code = ICMP_FRAG_NEEDED;
+ icmp_hdr->un.frag.mtu = htons(max_pcktsz - sizeof(struct ethhdr));
+ icmp_hdr->checksum = 0;
+ ipv4_csum(icmp_hdr, ICMP_TOOBIG_PAYLOAD_SIZE, &csum);
+ icmp_hdr->checksum = csum;
+ iph->ttl = DEFAULT_TTL;
+ iph->daddr = orig_iph->saddr;
+ iph->saddr = orig_iph->daddr;
+ iph->version = 4;
+ iph->ihl = 5;
+ iph->protocol = IPPROTO_ICMP;
+ iph->tos = 0;
+ iph->tot_len = htons(
+ ICMP_TOOBIG_SIZE + headroom - sizeof(struct ethhdr));
+ iph->check = 0;
+ csum = 0;
+ ipv4_csum(iph, sizeof(struct iphdr), &csum);
+ iph->check = csum;
+ count_icmp();
+ return XDP_TX;
+}
+
+
+static __always_inline int handle_ipv4(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ int pckt_size = data_end - data;
+ int offset;
+
+ if (pckt_size > max(max_pcktsz, ICMP_TOOBIG_SIZE)) {
+ offset = pckt_size - ICMP_TOOBIG_SIZE;
+ if (bpf_xdp_adjust_tail(xdp, 0 - offset))
+ return XDP_PASS;
+ return send_icmp4_too_big(xdp);
+ }
+ return XDP_PASS;
+}
+
+SEC("xdp_icmp")
+int _xdp_icmp(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eth = data;
+ __u16 h_proto;
+
+ if (eth + 1 > data_end)
+ return XDP_DROP;
+
+ h_proto = eth->h_proto;
+
+ if (h_proto == htons(ETH_P_IP))
+ return handle_ipv4(xdp);
+ else
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_adjust_tail_user.c b/samples/bpf/xdp_adjust_tail_user.c
new file mode 100644
index 000000000..167646077
--- /dev/null
+++ b/samples/bpf/xdp_adjust_tail_user.c
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <net/if.h>
+#include <arpa/inet.h>
+#include <netinet/ether.h>
+#include <unistd.h>
+#include <time.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#define STATS_INTERVAL_S 2U
+#define MAX_PCKT_SIZE 600
+
+static int ifindex = -1;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static __u32 prog_id;
+
+static void int_exit(int sig)
+{
+ __u32 curr_prog_id = 0;
+
+ if (ifindex > -1) {
+ if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
+ printf("bpf_xdp_query_id failed\n");
+ exit(1);
+ }
+ if (prog_id == curr_prog_id)
+ bpf_xdp_detach(ifindex, xdp_flags, NULL);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on a given iface\n");
+ else
+ printf("program on interface changed, not removing\n");
+ }
+ exit(0);
+}
+
+/* simple "icmp packet too big sent" counter
+ */
+static void poll_stats(unsigned int map_fd, unsigned int kill_after_s)
+{
+ time_t started_at = time(NULL);
+ __u64 value = 0;
+ int key = 0;
+
+
+ while (!kill_after_s || time(NULL) - started_at <= kill_after_s) {
+ sleep(STATS_INTERVAL_S);
+
+ assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
+
+ printf("icmp \"packet too big\" sent: %10llu pkts\n", value);
+ }
+}
+
+static void usage(const char *cmd)
+{
+ printf("Start a XDP prog which send ICMP \"packet too big\" \n"
+ "messages if ingress packet is bigger then MAX_SIZE bytes\n");
+ printf("Usage: %s [...]\n", cmd);
+ printf(" -i <ifname|ifindex> Interface\n");
+ printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n");
+ printf(" -P <MAX_PCKT_SIZE> Default: %u\n", MAX_PCKT_SIZE);
+ printf(" -S use skb-mode\n");
+ printf(" -N enforce native mode\n");
+ printf(" -F force loading prog\n");
+ printf(" -h Display this help\n");
+}
+
+int main(int argc, char **argv)
+{
+ unsigned char opt_flags[256] = {};
+ const char *optstr = "i:T:P:SNFh";
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ unsigned int kill_after_s = 0;
+ int i, prog_fd, map_fd, opt;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 max_pckt_size = 0;
+ __u32 key = 0;
+ char filename[256];
+ int err;
+
+ for (i = 0; i < strlen(optstr); i++)
+ if (optstr[i] != 'h' && 'a' <= optstr[i] && optstr[i] <= 'z')
+ opt_flags[(unsigned char)optstr[i]] = 1;
+
+ while ((opt = getopt(argc, argv, optstr)) != -1) {
+
+ switch (opt) {
+ case 'i':
+ ifindex = if_nametoindex(optarg);
+ if (!ifindex)
+ ifindex = atoi(optarg);
+ break;
+ case 'T':
+ kill_after_s = atoi(optarg);
+ break;
+ case 'P':
+ max_pckt_size = atoi(optarg);
+ break;
+ case 'S':
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ case 'N':
+ /* default, set below */
+ break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
+ default:
+ usage(argv[0]);
+ return 1;
+ }
+ opt_flags[opt] = 0;
+ }
+
+ if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
+ xdp_flags |= XDP_FLAGS_DRV_MODE;
+
+ for (i = 0; i < strlen(optstr); i++) {
+ if (opt_flags[(unsigned int)optstr[i]]) {
+ fprintf(stderr, "Missing argument -%c\n", optstr[i]);
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ if (!ifindex) {
+ fprintf(stderr, "Invalid ifname\n");
+ return 1;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj))
+ return 1;
+
+ prog = bpf_object__next_program(obj, NULL);
+ bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
+
+ err = bpf_object__load(obj);
+ if (err)
+ return 1;
+
+ prog_fd = bpf_program__fd(prog);
+
+ /* static global var 'max_pcktsz' is accessible from .data section */
+ if (max_pckt_size) {
+ map_fd = bpf_object__find_map_fd_by_name(obj, "xdp_adju.data");
+ if (map_fd < 0) {
+ printf("finding a max_pcktsz map in obj file failed\n");
+ return 1;
+ }
+ bpf_map_update_elem(map_fd, &key, &max_pckt_size, BPF_ANY);
+ }
+
+ /* fetch icmpcnt map */
+ map_fd = bpf_object__find_map_fd_by_name(obj, "icmpcnt");
+ if (map_fd < 0) {
+ printf("finding a icmpcnt map in obj file failed\n");
+ return 1;
+ }
+
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) {
+ printf("link set xdp fd failed\n");
+ return 1;
+ }
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return 1;
+ }
+ prog_id = info.id;
+
+ poll_stats(map_fd, kill_after_s);
+ int_exit(0);
+
+ return 0;
+}
diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c
new file mode 100644
index 000000000..54c099cbd
--- /dev/null
+++ b/samples/bpf/xdp_fwd_kern.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-18 David Ahern <dsahern@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include <bpf/bpf_helpers.h>
+
+#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 64);
+} xdp_tx_ports SEC(".maps");
+
+/* from include/net/ip.h */
+static __always_inline int ip_decrease_ttl(struct iphdr *iph)
+{
+ u32 check = (__force u32)iph->check;
+
+ check += (__force u32)htons(0x0100);
+ iph->check = (__force __sum16)(check + (check >= 0xFFFF));
+ return --iph->ttl;
+}
+
+static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct bpf_fib_lookup fib_params;
+ struct ethhdr *eth = data;
+ struct ipv6hdr *ip6h;
+ struct iphdr *iph;
+ u16 h_proto;
+ u64 nh_off;
+ int rc;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ __builtin_memset(&fib_params, 0, sizeof(fib_params));
+
+ h_proto = eth->h_proto;
+ if (h_proto == htons(ETH_P_IP)) {
+ iph = data + nh_off;
+
+ if (iph + 1 > data_end)
+ return XDP_DROP;
+
+ if (iph->ttl <= 1)
+ return XDP_PASS;
+
+ fib_params.family = AF_INET;
+ fib_params.tos = iph->tos;
+ fib_params.l4_protocol = iph->protocol;
+ fib_params.sport = 0;
+ fib_params.dport = 0;
+ fib_params.tot_len = ntohs(iph->tot_len);
+ fib_params.ipv4_src = iph->saddr;
+ fib_params.ipv4_dst = iph->daddr;
+ } else if (h_proto == htons(ETH_P_IPV6)) {
+ struct in6_addr *src = (struct in6_addr *) fib_params.ipv6_src;
+ struct in6_addr *dst = (struct in6_addr *) fib_params.ipv6_dst;
+
+ ip6h = data + nh_off;
+ if (ip6h + 1 > data_end)
+ return XDP_DROP;
+
+ if (ip6h->hop_limit <= 1)
+ return XDP_PASS;
+
+ fib_params.family = AF_INET6;
+ fib_params.flowinfo = *(__be32 *)ip6h & IPV6_FLOWINFO_MASK;
+ fib_params.l4_protocol = ip6h->nexthdr;
+ fib_params.sport = 0;
+ fib_params.dport = 0;
+ fib_params.tot_len = ntohs(ip6h->payload_len);
+ *src = ip6h->saddr;
+ *dst = ip6h->daddr;
+ } else {
+ return XDP_PASS;
+ }
+
+ fib_params.ifindex = ctx->ingress_ifindex;
+
+ rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
+ /*
+ * Some rc (return codes) from bpf_fib_lookup() are important,
+ * to understand how this XDP-prog interacts with network stack.
+ *
+ * BPF_FIB_LKUP_RET_NO_NEIGH:
+ * Even if route lookup was a success, then the MAC-addresses are also
+ * needed. This is obtained from arp/neighbour table, but if table is
+ * (still) empty then BPF_FIB_LKUP_RET_NO_NEIGH is returned. To avoid
+ * doing ARP lookup directly from XDP, then send packet to normal
+ * network stack via XDP_PASS and expect it will do ARP resolution.
+ *
+ * BPF_FIB_LKUP_RET_FWD_DISABLED:
+ * The bpf_fib_lookup respect sysctl net.ipv{4,6}.conf.all.forwarding
+ * setting, and will return BPF_FIB_LKUP_RET_FWD_DISABLED if not
+ * enabled this on ingress device.
+ */
+ if (rc == BPF_FIB_LKUP_RET_SUCCESS) {
+ /* Verify egress index has been configured as TX-port.
+ * (Note: User can still have inserted an egress ifindex that
+ * doesn't support XDP xmit, which will result in packet drops).
+ *
+ * Note: lookup in devmap supported since 0cdbb4b09a0.
+ * If not supported will fail with:
+ * cannot pass map_type 14 into func bpf_map_lookup_elem#1:
+ */
+ if (!bpf_map_lookup_elem(&xdp_tx_ports, &fib_params.ifindex))
+ return XDP_PASS;
+
+ if (h_proto == htons(ETH_P_IP))
+ ip_decrease_ttl(iph);
+ else if (h_proto == htons(ETH_P_IPV6))
+ ip6h->hop_limit--;
+
+ memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN);
+ memcpy(eth->h_source, fib_params.smac, ETH_ALEN);
+ return bpf_redirect_map(&xdp_tx_ports, fib_params.ifindex, 0);
+ }
+
+ return XDP_PASS;
+}
+
+SEC("xdp_fwd")
+int xdp_fwd_prog(struct xdp_md *ctx)
+{
+ return xdp_fwd_flags(ctx, 0);
+}
+
+SEC("xdp_fwd_direct")
+int xdp_fwd_direct_prog(struct xdp_md *ctx)
+{
+ return xdp_fwd_flags(ctx, BPF_FIB_LOOKUP_DIRECT);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_fwd_user.c b/samples/bpf/xdp_fwd_user.c
new file mode 100644
index 000000000..84f57f120
--- /dev/null
+++ b/samples/bpf/xdp_fwd_user.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-18 David Ahern <dsahern@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <linux/limits.h>
+#include <net/if.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <libgen.h>
+
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+
+static int do_attach(int idx, int prog_fd, int map_fd, const char *name)
+{
+ int err;
+
+ err = bpf_xdp_attach(idx, prog_fd, xdp_flags, NULL);
+ if (err < 0) {
+ printf("ERROR: failed to attach program to %s\n", name);
+ return err;
+ }
+
+ /* Adding ifindex as a possible egress TX port */
+ err = bpf_map_update_elem(map_fd, &idx, &idx, 0);
+ if (err)
+ printf("ERROR: failed using device %s as TX-port\n", name);
+
+ return err;
+}
+
+static int do_detach(int ifindex, const char *ifname, const char *app_name)
+{
+ LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
+ struct bpf_prog_info prog_info = {};
+ char prog_name[BPF_OBJ_NAME_LEN];
+ __u32 info_len, curr_prog_id;
+ int prog_fd;
+ int err = 1;
+
+ if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
+ printf("ERROR: bpf_xdp_query_id failed (%s)\n",
+ strerror(errno));
+ return err;
+ }
+
+ if (!curr_prog_id) {
+ printf("ERROR: flags(0x%x) xdp prog is not attached to %s\n",
+ xdp_flags, ifname);
+ return err;
+ }
+
+ info_len = sizeof(prog_info);
+ prog_fd = bpf_prog_get_fd_by_id(curr_prog_id);
+ if (prog_fd < 0) {
+ printf("ERROR: bpf_prog_get_fd_by_id failed (%s)\n",
+ strerror(errno));
+ return prog_fd;
+ }
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ if (err) {
+ printf("ERROR: bpf_obj_get_info_by_fd failed (%s)\n",
+ strerror(errno));
+ goto close_out;
+ }
+ snprintf(prog_name, sizeof(prog_name), "%s_prog", app_name);
+ prog_name[BPF_OBJ_NAME_LEN - 1] = '\0';
+
+ if (strcmp(prog_info.name, prog_name)) {
+ printf("ERROR: %s isn't attached to %s\n", app_name, ifname);
+ err = 1;
+ goto close_out;
+ }
+
+ opts.old_prog_fd = prog_fd;
+ err = bpf_xdp_detach(ifindex, xdp_flags, &opts);
+ if (err < 0)
+ printf("ERROR: failed to detach program from %s (%s)\n",
+ ifname, strerror(errno));
+ /* TODO: Remember to cleanup map, when adding use of shared map
+ * bpf_map_delete_elem((map_fd, &idx);
+ */
+close_out:
+ close(prog_fd);
+ return err;
+}
+
+static void usage(const char *prog)
+{
+ fprintf(stderr,
+ "usage: %s [OPTS] interface-list\n"
+ "\nOPTS:\n"
+ " -d detach program\n"
+ " -S use skb-mode\n"
+ " -F force loading prog\n"
+ " -D direct table lookups (skip fib rules)\n",
+ prog);
+}
+
+int main(int argc, char **argv)
+{
+ const char *prog_name = "xdp_fwd";
+ struct bpf_program *prog = NULL;
+ struct bpf_program *pos;
+ const char *sec_name;
+ int prog_fd = -1, map_fd = -1;
+ char filename[PATH_MAX];
+ struct bpf_object *obj;
+ int opt, i, idx, err;
+ int attach = 1;
+ int ret = 0;
+
+ while ((opt = getopt(argc, argv, ":dDSF")) != -1) {
+ switch (opt) {
+ case 'd':
+ attach = 0;
+ break;
+ case 'S':
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
+ case 'D':
+ prog_name = "xdp_fwd_direct";
+ break;
+ default:
+ usage(basename(argv[0]));
+ return 1;
+ }
+ }
+
+ if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
+ xdp_flags |= XDP_FLAGS_DRV_MODE;
+
+ if (optind == argc) {
+ usage(basename(argv[0]));
+ return 1;
+ }
+
+ if (attach) {
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (access(filename, O_RDONLY) < 0) {
+ printf("error accessing file %s: %s\n",
+ filename, strerror(errno));
+ return 1;
+ }
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj))
+ return 1;
+
+ prog = bpf_object__next_program(obj, NULL);
+ bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
+
+ err = bpf_object__load(obj);
+ if (err) {
+ printf("Does kernel support devmap lookup?\n");
+ /* If not, the error message will be:
+ * "cannot pass map_type 14 into func bpf_map_lookup_elem#1"
+ */
+ return 1;
+ }
+
+ bpf_object__for_each_program(pos, obj) {
+ sec_name = bpf_program__section_name(pos);
+ if (sec_name && !strcmp(sec_name, prog_name)) {
+ prog = pos;
+ break;
+ }
+ }
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ printf("program not found: %s\n", strerror(prog_fd));
+ return 1;
+ }
+ map_fd = bpf_map__fd(bpf_object__find_map_by_name(obj,
+ "xdp_tx_ports"));
+ if (map_fd < 0) {
+ printf("map not found: %s\n", strerror(map_fd));
+ return 1;
+ }
+ }
+
+ for (i = optind; i < argc; ++i) {
+ idx = if_nametoindex(argv[i]);
+ if (!idx)
+ idx = strtoul(argv[i], NULL, 0);
+
+ if (!idx) {
+ fprintf(stderr, "Invalid arg\n");
+ return 1;
+ }
+ if (!attach) {
+ err = do_detach(idx, argv[i], prog_name);
+ if (err)
+ ret = err;
+ } else {
+ err = do_attach(idx, prog_fd, map_fd, argv[i]);
+ if (err)
+ ret = err;
+ }
+ }
+
+ return ret;
+}
diff --git a/samples/bpf/xdp_monitor.bpf.c b/samples/bpf/xdp_monitor.bpf.c
new file mode 100644
index 000000000..cfb41e220
--- /dev/null
+++ b/samples/bpf/xdp_monitor.bpf.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ * XDP monitor tool, based on tracepoints
+ */
+#include "xdp_sample.bpf.h"
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
new file mode 100644
index 000000000..58015eb2f
--- /dev/null
+++ b/samples/bpf/xdp_monitor_user.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */
+static const char *__doc__=
+"XDP monitor tool, based on tracepoints\n";
+
+static const char *__doc_err_only__=
+" NOTICE: Only tracking XDP redirect errors\n"
+" Enable redirect success stats via '-s/--stats'\n"
+" (which comes with a per packet processing overhead)\n";
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <locale.h>
+#include <getopt.h>
+#include <net/if.h>
+#include <time.h>
+#include <signal.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+#include "xdp_sample_user.h"
+#include "xdp_monitor.skel.h"
+
+static int mask = SAMPLE_REDIRECT_ERR_CNT | SAMPLE_CPUMAP_ENQUEUE_CNT |
+ SAMPLE_CPUMAP_KTHREAD_CNT | SAMPLE_EXCEPTION_CNT |
+ SAMPLE_DEVMAP_XMIT_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI;
+
+DEFINE_SAMPLE_INIT(xdp_monitor);
+
+static const struct option long_options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "stats", no_argument, NULL, 's' },
+ { "interval", required_argument, NULL, 'i' },
+ { "verbose", no_argument, NULL, 'v' },
+ {}
+};
+
+int main(int argc, char **argv)
+{
+ unsigned long interval = 2;
+ int ret = EXIT_FAIL_OPTION;
+ struct xdp_monitor *skel;
+ bool errors_only = true;
+ int longindex = 0, opt;
+ bool error = true;
+
+ /* Parse commands line args */
+ while ((opt = getopt_long(argc, argv, "si:vh",
+ long_options, &longindex)) != -1) {
+ switch (opt) {
+ case 's':
+ errors_only = false;
+ mask |= SAMPLE_REDIRECT_CNT;
+ break;
+ case 'i':
+ interval = strtoul(optarg, NULL, 0);
+ break;
+ case 'v':
+ sample_switch_mode();
+ break;
+ case 'h':
+ error = false;
+ default:
+ sample_usage(argv, long_options, __doc__, mask, error);
+ return ret;
+ }
+ }
+
+ skel = xdp_monitor__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to xdp_monitor__open: %s\n",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end;
+ }
+
+ ret = sample_init_pre_load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ ret = xdp_monitor__load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to xdp_monitor__load: %s\n", strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ ret = sample_init(skel, mask);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ if (errors_only)
+ printf("%s", __doc_err_only__);
+
+ ret = sample_run(interval, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+ ret = EXIT_OK;
+end_destroy:
+ xdp_monitor__destroy(skel);
+end:
+ sample_exit(ret);
+}
diff --git a/samples/bpf/xdp_redirect.bpf.c b/samples/bpf/xdp_redirect.bpf.c
new file mode 100644
index 000000000..7c02bacfe
--- /dev/null
+++ b/samples/bpf/xdp_redirect.bpf.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016 John Fastabend <john.r.fastabend@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include "vmlinux.h"
+#include "xdp_sample.bpf.h"
+#include "xdp_sample_shared.h"
+
+const volatile int ifindex_out;
+
+SEC("xdp")
+int xdp_redirect_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u32 key = bpf_get_smp_processor_id();
+ struct ethhdr *eth = data;
+ struct datarec *rec;
+ u64 nh_off;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_PASS;
+ NO_TEAR_INC(rec->processed);
+
+ swap_src_dst_mac(data);
+ return bpf_redirect(ifindex_out, 0);
+}
+
+/* Redirect require an XDP bpf_prog loaded on the TX device */
+SEC("xdp")
+int xdp_redirect_dummy_prog(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_cpu.bpf.c b/samples/bpf/xdp_redirect_cpu.bpf.c
new file mode 100644
index 000000000..87c54bfdb
--- /dev/null
+++ b/samples/bpf/xdp_redirect_cpu.bpf.c
@@ -0,0 +1,539 @@
+/* XDP redirect to CPUs via cpumap (BPF_MAP_TYPE_CPUMAP)
+ *
+ * GPLv2, Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
+ */
+#include "vmlinux.h"
+#include "xdp_sample.bpf.h"
+#include "xdp_sample_shared.h"
+#include "hash_func01.h"
+
+/* Special map type that can XDP_REDIRECT frames to another CPU */
+struct {
+ __uint(type, BPF_MAP_TYPE_CPUMAP);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(struct bpf_cpumap_val));
+} cpu_map SEC(".maps");
+
+/* Set of maps controlling available CPU, and for iterating through
+ * selectable redirect CPUs.
+ */
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+} cpus_available SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 1);
+} cpus_count SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 1);
+} cpus_iterator SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 1);
+} tx_port SEC(".maps");
+
+char tx_mac_addr[ETH_ALEN];
+
+/* Helper parse functions */
+
+static __always_inline
+bool parse_eth(struct ethhdr *eth, void *data_end,
+ u16 *eth_proto, u64 *l3_offset)
+{
+ u16 eth_type;
+ u64 offset;
+
+ offset = sizeof(*eth);
+ if ((void *)eth + offset > data_end)
+ return false;
+
+ eth_type = eth->h_proto;
+
+ /* Skip non 802.3 Ethertypes */
+ if (__builtin_expect(bpf_ntohs(eth_type) < ETH_P_802_3_MIN, 0))
+ return false;
+
+ /* Handle VLAN tagged packet */
+ if (eth_type == bpf_htons(ETH_P_8021Q) ||
+ eth_type == bpf_htons(ETH_P_8021AD)) {
+ struct vlan_hdr *vlan_hdr;
+
+ vlan_hdr = (void *)eth + offset;
+ offset += sizeof(*vlan_hdr);
+ if ((void *)eth + offset > data_end)
+ return false;
+ eth_type = vlan_hdr->h_vlan_encapsulated_proto;
+ }
+ /* Handle double VLAN tagged packet */
+ if (eth_type == bpf_htons(ETH_P_8021Q) ||
+ eth_type == bpf_htons(ETH_P_8021AD)) {
+ struct vlan_hdr *vlan_hdr;
+
+ vlan_hdr = (void *)eth + offset;
+ offset += sizeof(*vlan_hdr);
+ if ((void *)eth + offset > data_end)
+ return false;
+ eth_type = vlan_hdr->h_vlan_encapsulated_proto;
+ }
+
+ *eth_proto = bpf_ntohs(eth_type);
+ *l3_offset = offset;
+ return true;
+}
+
+static __always_inline
+u16 get_dest_port_ipv4_udp(struct xdp_md *ctx, u64 nh_off)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct iphdr *iph = data + nh_off;
+ struct udphdr *udph;
+
+ if (iph + 1 > data_end)
+ return 0;
+ if (!(iph->protocol == IPPROTO_UDP))
+ return 0;
+
+ udph = (void *)(iph + 1);
+ if (udph + 1 > data_end)
+ return 0;
+
+ return bpf_ntohs(udph->dest);
+}
+
+static __always_inline
+int get_proto_ipv4(struct xdp_md *ctx, u64 nh_off)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct iphdr *iph = data + nh_off;
+
+ if (iph + 1 > data_end)
+ return 0;
+ return iph->protocol;
+}
+
+static __always_inline
+int get_proto_ipv6(struct xdp_md *ctx, u64 nh_off)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct ipv6hdr *ip6h = data + nh_off;
+
+ if (ip6h + 1 > data_end)
+ return 0;
+ return ip6h->nexthdr;
+}
+
+SEC("xdp")
+int xdp_prognum0_no_touch(struct xdp_md *ctx)
+{
+ u32 key = bpf_get_smp_processor_id();
+ struct datarec *rec;
+ u32 *cpu_selected;
+ u32 cpu_dest = 0;
+ u32 key0 = 0;
+
+ /* Only use first entry in cpus_available */
+ cpu_selected = bpf_map_lookup_elem(&cpus_available, &key0);
+ if (!cpu_selected)
+ return XDP_ABORTED;
+ cpu_dest = *cpu_selected;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_PASS;
+ NO_TEAR_INC(rec->processed);
+
+ if (cpu_dest >= nr_cpus) {
+ NO_TEAR_INC(rec->issue);
+ return XDP_ABORTED;
+ }
+ return bpf_redirect_map(&cpu_map, cpu_dest, 0);
+}
+
+SEC("xdp")
+int xdp_prognum1_touch_data(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u32 key = bpf_get_smp_processor_id();
+ struct ethhdr *eth = data;
+ struct datarec *rec;
+ u32 *cpu_selected;
+ u32 cpu_dest = 0;
+ u32 key0 = 0;
+ u16 eth_type;
+
+ /* Only use first entry in cpus_available */
+ cpu_selected = bpf_map_lookup_elem(&cpus_available, &key0);
+ if (!cpu_selected)
+ return XDP_ABORTED;
+ cpu_dest = *cpu_selected;
+
+ /* Validate packet length is minimum Eth header size */
+ if (eth + 1 > data_end)
+ return XDP_ABORTED;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_PASS;
+ NO_TEAR_INC(rec->processed);
+
+ /* Read packet data, and use it (drop non 802.3 Ethertypes) */
+ eth_type = eth->h_proto;
+ if (bpf_ntohs(eth_type) < ETH_P_802_3_MIN) {
+ NO_TEAR_INC(rec->dropped);
+ return XDP_DROP;
+ }
+
+ if (cpu_dest >= nr_cpus) {
+ NO_TEAR_INC(rec->issue);
+ return XDP_ABORTED;
+ }
+ return bpf_redirect_map(&cpu_map, cpu_dest, 0);
+}
+
+SEC("xdp")
+int xdp_prognum2_round_robin(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u32 key = bpf_get_smp_processor_id();
+ struct datarec *rec;
+ u32 cpu_dest = 0;
+ u32 key0 = 0;
+
+ u32 *cpu_selected;
+ u32 *cpu_iterator;
+ u32 *cpu_max;
+ u32 cpu_idx;
+
+ cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
+ if (!cpu_max)
+ return XDP_ABORTED;
+
+ cpu_iterator = bpf_map_lookup_elem(&cpus_iterator, &key0);
+ if (!cpu_iterator)
+ return XDP_ABORTED;
+ cpu_idx = *cpu_iterator;
+
+ *cpu_iterator += 1;
+ if (*cpu_iterator == *cpu_max)
+ *cpu_iterator = 0;
+
+ cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
+ if (!cpu_selected)
+ return XDP_ABORTED;
+ cpu_dest = *cpu_selected;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_PASS;
+ NO_TEAR_INC(rec->processed);
+
+ if (cpu_dest >= nr_cpus) {
+ NO_TEAR_INC(rec->issue);
+ return XDP_ABORTED;
+ }
+ return bpf_redirect_map(&cpu_map, cpu_dest, 0);
+}
+
+SEC("xdp")
+int xdp_prognum3_proto_separate(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u32 key = bpf_get_smp_processor_id();
+ struct ethhdr *eth = data;
+ u8 ip_proto = IPPROTO_UDP;
+ struct datarec *rec;
+ u16 eth_proto = 0;
+ u64 l3_offset = 0;
+ u32 cpu_dest = 0;
+ u32 *cpu_lookup;
+ u32 cpu_idx = 0;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_PASS;
+ NO_TEAR_INC(rec->processed);
+
+ if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
+ return XDP_PASS; /* Just skip */
+
+ /* Extract L4 protocol */
+ switch (eth_proto) {
+ case ETH_P_IP:
+ ip_proto = get_proto_ipv4(ctx, l3_offset);
+ break;
+ case ETH_P_IPV6:
+ ip_proto = get_proto_ipv6(ctx, l3_offset);
+ break;
+ case ETH_P_ARP:
+ cpu_idx = 0; /* ARP packet handled on separate CPU */
+ break;
+ default:
+ cpu_idx = 0;
+ }
+
+ /* Choose CPU based on L4 protocol */
+ switch (ip_proto) {
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ cpu_idx = 2;
+ break;
+ case IPPROTO_TCP:
+ cpu_idx = 0;
+ break;
+ case IPPROTO_UDP:
+ cpu_idx = 1;
+ break;
+ default:
+ cpu_idx = 0;
+ }
+
+ cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
+ if (!cpu_lookup)
+ return XDP_ABORTED;
+ cpu_dest = *cpu_lookup;
+
+ if (cpu_dest >= nr_cpus) {
+ NO_TEAR_INC(rec->issue);
+ return XDP_ABORTED;
+ }
+ return bpf_redirect_map(&cpu_map, cpu_dest, 0);
+}
+
+SEC("xdp")
+int xdp_prognum4_ddos_filter_pktgen(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u32 key = bpf_get_smp_processor_id();
+ struct ethhdr *eth = data;
+ u8 ip_proto = IPPROTO_UDP;
+ struct datarec *rec;
+ u16 eth_proto = 0;
+ u64 l3_offset = 0;
+ u32 cpu_dest = 0;
+ u32 *cpu_lookup;
+ u32 cpu_idx = 0;
+ u16 dest_port;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_PASS;
+ NO_TEAR_INC(rec->processed);
+
+ if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
+ return XDP_PASS; /* Just skip */
+
+ /* Extract L4 protocol */
+ switch (eth_proto) {
+ case ETH_P_IP:
+ ip_proto = get_proto_ipv4(ctx, l3_offset);
+ break;
+ case ETH_P_IPV6:
+ ip_proto = get_proto_ipv6(ctx, l3_offset);
+ break;
+ case ETH_P_ARP:
+ cpu_idx = 0; /* ARP packet handled on separate CPU */
+ break;
+ default:
+ cpu_idx = 0;
+ }
+
+ /* Choose CPU based on L4 protocol */
+ switch (ip_proto) {
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ cpu_idx = 2;
+ break;
+ case IPPROTO_TCP:
+ cpu_idx = 0;
+ break;
+ case IPPROTO_UDP:
+ cpu_idx = 1;
+ /* DDoS filter UDP port 9 (pktgen) */
+ dest_port = get_dest_port_ipv4_udp(ctx, l3_offset);
+ if (dest_port == 9) {
+ NO_TEAR_INC(rec->dropped);
+ return XDP_DROP;
+ }
+ break;
+ default:
+ cpu_idx = 0;
+ }
+
+ cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
+ if (!cpu_lookup)
+ return XDP_ABORTED;
+ cpu_dest = *cpu_lookup;
+
+ if (cpu_dest >= nr_cpus) {
+ NO_TEAR_INC(rec->issue);
+ return XDP_ABORTED;
+ }
+ return bpf_redirect_map(&cpu_map, cpu_dest, 0);
+}
+
+/* Hashing initval */
+#define INITVAL 15485863
+
+static __always_inline
+u32 get_ipv4_hash_ip_pair(struct xdp_md *ctx, u64 nh_off)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct iphdr *iph = data + nh_off;
+ u32 cpu_hash;
+
+ if (iph + 1 > data_end)
+ return 0;
+
+ cpu_hash = iph->saddr + iph->daddr;
+ cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + iph->protocol);
+
+ return cpu_hash;
+}
+
+static __always_inline
+u32 get_ipv6_hash_ip_pair(struct xdp_md *ctx, u64 nh_off)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct ipv6hdr *ip6h = data + nh_off;
+ u32 cpu_hash;
+
+ if (ip6h + 1 > data_end)
+ return 0;
+
+ cpu_hash = ip6h->saddr.in6_u.u6_addr32[0] + ip6h->daddr.in6_u.u6_addr32[0];
+ cpu_hash += ip6h->saddr.in6_u.u6_addr32[1] + ip6h->daddr.in6_u.u6_addr32[1];
+ cpu_hash += ip6h->saddr.in6_u.u6_addr32[2] + ip6h->daddr.in6_u.u6_addr32[2];
+ cpu_hash += ip6h->saddr.in6_u.u6_addr32[3] + ip6h->daddr.in6_u.u6_addr32[3];
+ cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + ip6h->nexthdr);
+
+ return cpu_hash;
+}
+
+/* Load-Balance traffic based on hashing IP-addrs + L4-proto. The
+ * hashing scheme is symmetric, meaning swapping IP src/dest still hit
+ * same CPU.
+ */
+SEC("xdp")
+int xdp_prognum5_lb_hash_ip_pairs(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u32 key = bpf_get_smp_processor_id();
+ struct ethhdr *eth = data;
+ struct datarec *rec;
+ u16 eth_proto = 0;
+ u64 l3_offset = 0;
+ u32 cpu_dest = 0;
+ u32 cpu_idx = 0;
+ u32 *cpu_lookup;
+ u32 key0 = 0;
+ u32 *cpu_max;
+ u32 cpu_hash;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_PASS;
+ NO_TEAR_INC(rec->processed);
+
+ cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
+ if (!cpu_max)
+ return XDP_ABORTED;
+
+ if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
+ return XDP_PASS; /* Just skip */
+
+ /* Hash for IPv4 and IPv6 */
+ switch (eth_proto) {
+ case ETH_P_IP:
+ cpu_hash = get_ipv4_hash_ip_pair(ctx, l3_offset);
+ break;
+ case ETH_P_IPV6:
+ cpu_hash = get_ipv6_hash_ip_pair(ctx, l3_offset);
+ break;
+ case ETH_P_ARP: /* ARP packet handled on CPU idx 0 */
+ default:
+ cpu_hash = 0;
+ }
+
+ /* Choose CPU based on hash */
+ cpu_idx = cpu_hash % *cpu_max;
+
+ cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
+ if (!cpu_lookup)
+ return XDP_ABORTED;
+ cpu_dest = *cpu_lookup;
+
+ if (cpu_dest >= nr_cpus) {
+ NO_TEAR_INC(rec->issue);
+ return XDP_ABORTED;
+ }
+ return bpf_redirect_map(&cpu_map, cpu_dest, 0);
+}
+
+SEC("xdp/cpumap")
+int xdp_redirect_cpu_devmap(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct ethhdr *eth = data;
+ u64 nh_off;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ swap_src_dst_mac(data);
+ return bpf_redirect_map(&tx_port, 0, 0);
+}
+
+SEC("xdp/cpumap")
+int xdp_redirect_cpu_pass(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+SEC("xdp/cpumap")
+int xdp_redirect_cpu_drop(struct xdp_md *ctx)
+{
+ return XDP_DROP;
+}
+
+SEC("xdp/devmap")
+int xdp_redirect_egress_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct ethhdr *eth = data;
+ u64 nh_off;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ __builtin_memcpy(eth->h_source, (const char *)tx_mac_addr, ETH_ALEN);
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
new file mode 100644
index 000000000..a12381c37
--- /dev/null
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -0,0 +1,559 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
+ */
+static const char *__doc__ =
+"XDP CPU redirect tool, using BPF_MAP_TYPE_CPUMAP\n"
+"Usage: xdp_redirect_cpu -d <IFINDEX|IFNAME> -c 0 ... -c N\n"
+"Valid specification for CPUMAP BPF program:\n"
+" --mprog-name/-e pass (use built-in XDP_PASS program)\n"
+" --mprog-name/-e drop (use built-in XDP_DROP program)\n"
+" --redirect-device/-r <ifindex|ifname> (use built-in DEVMAP redirect program)\n"
+" Custom CPUMAP BPF program:\n"
+" --mprog-filename/-f <filename> --mprog-name/-e <program>\n"
+" Optionally, also pass --redirect-map/-m and --redirect-device/-r together\n"
+" to configure DEVMAP in BPF object <filename>\n";
+
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <locale.h>
+#include <sys/sysinfo.h>
+#include <getopt.h>
+#include <net/if.h>
+#include <time.h>
+#include <linux/limits.h>
+#include <arpa/inet.h>
+#include <linux/if_link.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+#include "xdp_sample_user.h"
+#include "xdp_redirect_cpu.skel.h"
+
+static int map_fd;
+static int avail_fd;
+static int count_fd;
+
+static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT |
+ SAMPLE_CPUMAP_ENQUEUE_CNT | SAMPLE_CPUMAP_KTHREAD_CNT |
+ SAMPLE_EXCEPTION_CNT;
+
+DEFINE_SAMPLE_INIT(xdp_redirect_cpu);
+
+static const struct option long_options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "dev", required_argument, NULL, 'd' },
+ { "skb-mode", no_argument, NULL, 'S' },
+ { "progname", required_argument, NULL, 'p' },
+ { "qsize", required_argument, NULL, 'q' },
+ { "cpu", required_argument, NULL, 'c' },
+ { "stress-mode", no_argument, NULL, 'x' },
+ { "force", no_argument, NULL, 'F' },
+ { "interval", required_argument, NULL, 'i' },
+ { "verbose", no_argument, NULL, 'v' },
+ { "stats", no_argument, NULL, 's' },
+ { "mprog-name", required_argument, NULL, 'e' },
+ { "mprog-filename", required_argument, NULL, 'f' },
+ { "redirect-device", required_argument, NULL, 'r' },
+ { "redirect-map", required_argument, NULL, 'm' },
+ {}
+};
+
+static void print_avail_progs(struct bpf_object *obj)
+{
+ struct bpf_program *pos;
+
+ printf(" Programs to be used for -p/--progname:\n");
+ bpf_object__for_each_program(pos, obj) {
+ if (bpf_program__type(pos) == BPF_PROG_TYPE_XDP) {
+ if (!strncmp(bpf_program__name(pos), "xdp_prognum",
+ sizeof("xdp_prognum") - 1))
+ printf(" %s\n", bpf_program__name(pos));
+ }
+ }
+}
+
+static void usage(char *argv[], const struct option *long_options,
+ const char *doc, int mask, bool error, struct bpf_object *obj)
+{
+ sample_usage(argv, long_options, doc, mask, error);
+ print_avail_progs(obj);
+}
+
+static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
+ __u32 avail_idx, bool new)
+{
+ __u32 curr_cpus_count = 0;
+ __u32 key = 0;
+ int ret;
+
+ /* Add a CPU entry to cpumap, as this allocate a cpu entry in
+ * the kernel for the cpu.
+ */
+ ret = bpf_map_update_elem(map_fd, &cpu, value, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Create CPU entry failed: %s\n", strerror(errno));
+ return ret;
+ }
+
+ /* Inform bpf_prog's that a new CPU is available to select
+ * from via some control maps.
+ */
+ ret = bpf_map_update_elem(avail_fd, &avail_idx, &cpu, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Add to avail CPUs failed: %s\n", strerror(errno));
+ return ret;
+ }
+
+ /* When not replacing/updating existing entry, bump the count */
+ ret = bpf_map_lookup_elem(count_fd, &key, &curr_cpus_count);
+ if (ret < 0) {
+ fprintf(stderr, "Failed reading curr cpus_count: %s\n",
+ strerror(errno));
+ return ret;
+ }
+ if (new) {
+ curr_cpus_count++;
+ ret = bpf_map_update_elem(count_fd, &key,
+ &curr_cpus_count, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Failed write curr cpus_count: %s\n",
+ strerror(errno));
+ return ret;
+ }
+ }
+
+ printf("%s CPU: %u as idx: %u qsize: %d cpumap_prog_fd: %d (cpus_count: %u)\n",
+ new ? "Add new" : "Replace", cpu, avail_idx,
+ value->qsize, value->bpf_prog.fd, curr_cpus_count);
+
+ return 0;
+}
+
+/* CPUs are zero-indexed. Thus, add a special sentinel default value
+ * in map cpus_available to mark CPU index'es not configured
+ */
+static int mark_cpus_unavailable(void)
+{
+ int ret, i, n_cpus = libbpf_num_possible_cpus();
+ __u32 invalid_cpu = n_cpus;
+
+ for (i = 0; i < n_cpus; i++) {
+ ret = bpf_map_update_elem(avail_fd, &i,
+ &invalid_cpu, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Failed marking CPU unavailable: %s\n",
+ strerror(errno));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Stress cpumap management code by concurrently changing underlying cpumap */
+static void stress_cpumap(void *ctx)
+{
+ struct bpf_cpumap_val *value = ctx;
+
+ /* Changing qsize will cause kernel to free and alloc a new
+ * bpf_cpu_map_entry, with an associated/complicated tear-down
+ * procedure.
+ */
+ value->qsize = 1024;
+ create_cpu_entry(1, value, 0, false);
+ value->qsize = 8;
+ create_cpu_entry(1, value, 0, false);
+ value->qsize = 16000;
+ create_cpu_entry(1, value, 0, false);
+}
+
+static int set_cpumap_prog(struct xdp_redirect_cpu *skel,
+ const char *redir_interface, const char *redir_map,
+ const char *mprog_filename, const char *mprog_name)
+{
+ if (mprog_filename) {
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ int ret;
+
+ if (!mprog_name) {
+ fprintf(stderr, "BPF program not specified for file %s\n",
+ mprog_filename);
+ goto end;
+ }
+ if ((redir_interface && !redir_map) || (!redir_interface && redir_map)) {
+ fprintf(stderr, "--redirect-%s specified but --redirect-%s not specified\n",
+ redir_interface ? "device" : "map", redir_interface ? "map" : "device");
+ goto end;
+ }
+
+ /* Custom BPF program */
+ obj = bpf_object__open_file(mprog_filename, NULL);
+ if (!obj) {
+ ret = -errno;
+ fprintf(stderr, "Failed to bpf_prog_load_xattr: %s\n",
+ strerror(errno));
+ return ret;
+ }
+
+ ret = bpf_object__load(obj);
+ if (ret < 0) {
+ ret = -errno;
+ fprintf(stderr, "Failed to bpf_object__load: %s\n",
+ strerror(errno));
+ return ret;
+ }
+
+ if (redir_map) {
+ int err, redir_map_fd, ifindex_out, key = 0;
+
+ redir_map_fd = bpf_object__find_map_fd_by_name(obj, redir_map);
+ if (redir_map_fd < 0) {
+ fprintf(stderr, "Failed to bpf_object__find_map_fd_by_name: %s\n",
+ strerror(errno));
+ return redir_map_fd;
+ }
+
+ ifindex_out = if_nametoindex(redir_interface);
+ if (!ifindex_out)
+ ifindex_out = strtoul(redir_interface, NULL, 0);
+ if (!ifindex_out) {
+ fprintf(stderr, "Bad interface name or index\n");
+ return -EINVAL;
+ }
+
+ err = bpf_map_update_elem(redir_map_fd, &key, &ifindex_out, 0);
+ if (err < 0)
+ return err;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, mprog_name);
+ if (!prog) {
+ ret = -errno;
+ fprintf(stderr, "Failed to bpf_object__find_program_by_name: %s\n",
+ strerror(errno));
+ return ret;
+ }
+
+ return bpf_program__fd(prog);
+ } else {
+ if (mprog_name) {
+ if (redir_interface || redir_map) {
+ fprintf(stderr, "Need to specify --mprog-filename/-f\n");
+ goto end;
+ }
+ if (!strcmp(mprog_name, "pass") || !strcmp(mprog_name, "drop")) {
+ /* Use built-in pass/drop programs */
+ return *mprog_name == 'p' ? bpf_program__fd(skel->progs.xdp_redirect_cpu_pass)
+ : bpf_program__fd(skel->progs.xdp_redirect_cpu_drop);
+ } else {
+ fprintf(stderr, "Unknown name \"%s\" for built-in BPF program\n",
+ mprog_name);
+ goto end;
+ }
+ } else {
+ if (redir_map) {
+ fprintf(stderr, "Need to specify --mprog-filename, --mprog-name and"
+ " --redirect-device with --redirect-map\n");
+ goto end;
+ }
+ if (redir_interface) {
+ /* Use built-in devmap redirect */
+ struct bpf_devmap_val val = {};
+ int ifindex_out, err;
+ __u32 key = 0;
+
+ if (!redir_interface)
+ return 0;
+
+ ifindex_out = if_nametoindex(redir_interface);
+ if (!ifindex_out)
+ ifindex_out = strtoul(redir_interface, NULL, 0);
+ if (!ifindex_out) {
+ fprintf(stderr, "Bad interface name or index\n");
+ return -EINVAL;
+ }
+
+ if (get_mac_addr(ifindex_out, skel->bss->tx_mac_addr) < 0) {
+ printf("Get interface %d mac failed\n", ifindex_out);
+ return -EINVAL;
+ }
+
+ val.ifindex = ifindex_out;
+ val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_redirect_egress_prog);
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.tx_port), &key, &val, 0);
+ if (err < 0)
+ return -errno;
+
+ return bpf_program__fd(skel->progs.xdp_redirect_cpu_devmap);
+ }
+ }
+ }
+
+ /* Disabled */
+ return 0;
+end:
+ fprintf(stderr, "Invalid options for CPUMAP BPF program\n");
+ return -EINVAL;
+}
+
+int main(int argc, char **argv)
+{
+ const char *redir_interface = NULL, *redir_map = NULL;
+ const char *mprog_filename = NULL, *mprog_name = NULL;
+ struct xdp_redirect_cpu *skel;
+ struct bpf_map_info info = {};
+ struct bpf_cpumap_val value;
+ __u32 infosz = sizeof(info);
+ int ret = EXIT_FAIL_OPTION;
+ unsigned long interval = 2;
+ bool stress_mode = false;
+ struct bpf_program *prog;
+ const char *prog_name;
+ bool generic = false;
+ bool force = false;
+ int added_cpus = 0;
+ bool error = true;
+ int longindex = 0;
+ int add_cpu = -1;
+ int ifindex = -1;
+ int *cpu, i, opt;
+ __u32 qsize;
+ int n_cpus;
+
+ n_cpus = libbpf_num_possible_cpus();
+
+ /* Notice: Choosing the queue size is very important when CPU is
+ * configured with power-saving states.
+ *
+ * If deepest state take 133 usec to wakeup from (133/10^6). When link
+ * speed is 10Gbit/s ((10*10^9/8) in bytes/sec). How many bytes can
+ * arrive with in 133 usec at this speed: (10*10^9/8)*(133/10^6) =
+ * 166250 bytes. With MTU size packets this is 110 packets, and with
+ * minimum Ethernet (MAC-preamble + intergap) 84 bytes is 1979 packets.
+ *
+ * Setting default cpumap queue to 2048 as worst-case (small packet)
+ * should be +64 packet due kthread wakeup call (due to xdp_do_flush)
+ * worst-case is 2043 packets.
+ *
+ * Sysadm can configured system to avoid deep-sleep via:
+ * tuned-adm profile network-latency
+ */
+ qsize = 2048;
+
+ skel = xdp_redirect_cpu__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to xdp_redirect_cpu__open: %s\n",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end;
+ }
+
+ ret = sample_init_pre_load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ if (bpf_map__set_max_entries(skel->maps.cpu_map, n_cpus) < 0) {
+ fprintf(stderr, "Failed to set max entries for cpu_map map: %s",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ if (bpf_map__set_max_entries(skel->maps.cpus_available, n_cpus) < 0) {
+ fprintf(stderr, "Failed to set max entries for cpus_available map: %s",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ cpu = calloc(n_cpus, sizeof(int));
+ if (!cpu) {
+ fprintf(stderr, "Failed to allocate cpu array\n");
+ goto end_destroy;
+ }
+
+ prog = skel->progs.xdp_prognum5_lb_hash_ip_pairs;
+ while ((opt = getopt_long(argc, argv, "d:si:Sxp:f:e:r:m:c:q:Fvh",
+ long_options, &longindex)) != -1) {
+ switch (opt) {
+ case 'd':
+ if (strlen(optarg) >= IF_NAMESIZE) {
+ fprintf(stderr, "-d/--dev name too long\n");
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
+ goto end_cpu;
+ }
+ ifindex = if_nametoindex(optarg);
+ if (!ifindex)
+ ifindex = strtoul(optarg, NULL, 0);
+ if (!ifindex) {
+ fprintf(stderr, "Bad interface index or name (%d): %s\n",
+ errno, strerror(errno));
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
+ goto end_cpu;
+ }
+ break;
+ case 's':
+ mask |= SAMPLE_REDIRECT_MAP_CNT;
+ break;
+ case 'i':
+ interval = strtoul(optarg, NULL, 0);
+ break;
+ case 'S':
+ generic = true;
+ break;
+ case 'x':
+ stress_mode = true;
+ break;
+ case 'p':
+ /* Selecting eBPF prog to load */
+ prog_name = optarg;
+ prog = bpf_object__find_program_by_name(skel->obj,
+ prog_name);
+ if (!prog) {
+ fprintf(stderr,
+ "Failed to find program %s specified by"
+ " option -p/--progname\n",
+ prog_name);
+ print_avail_progs(skel->obj);
+ goto end_cpu;
+ }
+ break;
+ case 'f':
+ mprog_filename = optarg;
+ break;
+ case 'e':
+ mprog_name = optarg;
+ break;
+ case 'r':
+ redir_interface = optarg;
+ mask |= SAMPLE_DEVMAP_XMIT_CNT_MULTI;
+ break;
+ case 'm':
+ redir_map = optarg;
+ break;
+ case 'c':
+ /* Add multiple CPUs */
+ add_cpu = strtoul(optarg, NULL, 0);
+ if (add_cpu >= n_cpus) {
+ fprintf(stderr,
+ "--cpu nr too large for cpumap err (%d):%s\n",
+ errno, strerror(errno));
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
+ goto end_cpu;
+ }
+ cpu[added_cpus++] = add_cpu;
+ break;
+ case 'q':
+ qsize = strtoul(optarg, NULL, 0);
+ break;
+ case 'F':
+ force = true;
+ break;
+ case 'v':
+ sample_switch_mode();
+ break;
+ case 'h':
+ error = false;
+ default:
+ usage(argv, long_options, __doc__, mask, error, skel->obj);
+ goto end_cpu;
+ }
+ }
+
+ ret = EXIT_FAIL_OPTION;
+ if (ifindex == -1) {
+ fprintf(stderr, "Required option --dev missing\n");
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
+ goto end_cpu;
+ }
+
+ if (add_cpu == -1) {
+ fprintf(stderr, "Required option --cpu missing\n"
+ "Specify multiple --cpu option to add more\n");
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
+ goto end_cpu;
+ }
+
+ skel->rodata->from_match[0] = ifindex;
+ if (redir_interface)
+ skel->rodata->to_match[0] = if_nametoindex(redir_interface);
+
+ ret = xdp_redirect_cpu__load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to xdp_redirect_cpu__load: %s\n",
+ strerror(errno));
+ goto end_cpu;
+ }
+
+ ret = bpf_obj_get_info_by_fd(bpf_map__fd(skel->maps.cpu_map), &info, &infosz);
+ if (ret < 0) {
+ fprintf(stderr, "Failed bpf_obj_get_info_by_fd for cpumap: %s\n",
+ strerror(errno));
+ goto end_cpu;
+ }
+
+ skel->bss->cpumap_map_id = info.id;
+
+ map_fd = bpf_map__fd(skel->maps.cpu_map);
+ avail_fd = bpf_map__fd(skel->maps.cpus_available);
+ count_fd = bpf_map__fd(skel->maps.cpus_count);
+
+ ret = mark_cpus_unavailable();
+ if (ret < 0) {
+ fprintf(stderr, "Unable to mark CPUs as unavailable\n");
+ goto end_cpu;
+ }
+
+ ret = sample_init(skel, mask);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_cpu;
+ }
+
+ value.bpf_prog.fd = set_cpumap_prog(skel, redir_interface, redir_map,
+ mprog_filename, mprog_name);
+ if (value.bpf_prog.fd < 0) {
+ fprintf(stderr, "Failed to set CPUMAP BPF program: %s\n",
+ strerror(-value.bpf_prog.fd));
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
+ ret = EXIT_FAIL_BPF;
+ goto end_cpu;
+ }
+ value.qsize = qsize;
+
+ for (i = 0; i < added_cpus; i++) {
+ if (create_cpu_entry(cpu[i], &value, i, true) < 0) {
+ fprintf(stderr, "Cannot proceed, exiting\n");
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
+ goto end_cpu;
+ }
+ }
+
+ ret = EXIT_FAIL_XDP;
+ if (sample_install_xdp(prog, ifindex, generic, force) < 0)
+ goto end_cpu;
+
+ ret = sample_run(interval, stress_mode ? stress_cpumap : NULL, &value);
+ if (ret < 0) {
+ fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_cpu;
+ }
+ ret = EXIT_OK;
+end_cpu:
+ free(cpu);
+end_destroy:
+ xdp_redirect_cpu__destroy(skel);
+end:
+ sample_exit(ret);
+}
diff --git a/samples/bpf/xdp_redirect_map.bpf.c b/samples/bpf/xdp_redirect_map.bpf.c
new file mode 100644
index 000000000..8557c278d
--- /dev/null
+++ b/samples/bpf/xdp_redirect_map.bpf.c
@@ -0,0 +1,97 @@
+/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#define KBUILD_MODNAME "foo"
+
+#include "vmlinux.h"
+#include "xdp_sample.bpf.h"
+#include "xdp_sample_shared.h"
+
+/* The 2nd xdp prog on egress does not support skb mode, so we define two
+ * maps, tx_port_general and tx_port_native.
+ */
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 1);
+} tx_port_general SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 1);
+} tx_port_native SEC(".maps");
+
+/* store egress interface mac address */
+const volatile __u8 tx_mac_addr[ETH_ALEN];
+
+static __always_inline int xdp_redirect_map(struct xdp_md *ctx, void *redirect_map)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u32 key = bpf_get_smp_processor_id();
+ struct ethhdr *eth = data;
+ struct datarec *rec;
+ u64 nh_off;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_PASS;
+ NO_TEAR_INC(rec->processed);
+ swap_src_dst_mac(data);
+ return bpf_redirect_map(redirect_map, 0, 0);
+}
+
+SEC("xdp")
+int xdp_redirect_map_general(struct xdp_md *ctx)
+{
+ return xdp_redirect_map(ctx, &tx_port_general);
+}
+
+SEC("xdp")
+int xdp_redirect_map_native(struct xdp_md *ctx)
+{
+ return xdp_redirect_map(ctx, &tx_port_native);
+}
+
+SEC("xdp/devmap")
+int xdp_redirect_map_egress(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u8 *mac_addr = (u8 *) tx_mac_addr;
+ struct ethhdr *eth = data;
+ u64 nh_off;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ barrier_var(mac_addr); /* prevent optimizing out memcpy */
+ __builtin_memcpy(eth->h_source, mac_addr, ETH_ALEN);
+
+ return XDP_PASS;
+}
+
+/* Redirect require an XDP bpf_prog loaded on the TX device */
+SEC("xdp")
+int xdp_redirect_dummy_prog(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_map_multi.bpf.c b/samples/bpf/xdp_redirect_map_multi.bpf.c
new file mode 100644
index 000000000..8b2fd4ec2
--- /dev/null
+++ b/samples/bpf/xdp_redirect_map_multi.bpf.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+#define KBUILD_MODNAME "foo"
+
+#include "vmlinux.h"
+#include "xdp_sample.bpf.h"
+#include "xdp_sample_shared.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 32);
+} forward_map_general SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(struct bpf_devmap_val));
+ __uint(max_entries, 32);
+} forward_map_native SEC(".maps");
+
+/* map to store egress interfaces mac addresses */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, __be64);
+ __uint(max_entries, 32);
+} mac_map SEC(".maps");
+
+static int xdp_redirect_map(struct xdp_md *ctx, void *forward_map)
+{
+ u32 key = bpf_get_smp_processor_id();
+ struct datarec *rec;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (!rec)
+ return XDP_PASS;
+ NO_TEAR_INC(rec->processed);
+
+ return bpf_redirect_map(forward_map, 0,
+ BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
+}
+
+SEC("xdp")
+int xdp_redirect_map_general(struct xdp_md *ctx)
+{
+ return xdp_redirect_map(ctx, &forward_map_general);
+}
+
+SEC("xdp")
+int xdp_redirect_map_native(struct xdp_md *ctx)
+{
+ return xdp_redirect_map(ctx, &forward_map_native);
+}
+
+SEC("xdp/devmap")
+int xdp_devmap_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ u32 key = ctx->egress_ifindex;
+ struct ethhdr *eth = data;
+ __be64 *mac;
+ u64 nh_off;
+
+ nh_off = sizeof(*eth);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+
+ mac = bpf_map_lookup_elem(&mac_map, &key);
+ if (mac)
+ __builtin_memcpy(eth->h_source, mac, ETH_ALEN);
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_map_multi_user.c b/samples/bpf/xdp_redirect_map_multi_user.c
new file mode 100644
index 000000000..9e24f2705
--- /dev/null
+++ b/samples/bpf/xdp_redirect_map_multi_user.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+static const char *__doc__ =
+"XDP multi redirect tool, using BPF_MAP_TYPE_DEVMAP and BPF_F_BROADCAST flag for bpf_redirect_map\n"
+"Usage: xdp_redirect_map_multi <IFINDEX|IFNAME> <IFINDEX|IFNAME> ... <IFINDEX|IFNAME>\n";
+
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <assert.h>
+#include <getopt.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <net/if.h>
+#include <unistd.h>
+#include <libgen.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <linux/if_ether.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+#include "xdp_sample_user.h"
+#include "xdp_redirect_map_multi.skel.h"
+
+#define MAX_IFACE_NUM 32
+static int ifaces[MAX_IFACE_NUM] = {};
+
+static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT |
+ SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT |
+ SAMPLE_DEVMAP_XMIT_CNT_MULTI | SAMPLE_SKIP_HEADING;
+
+DEFINE_SAMPLE_INIT(xdp_redirect_map_multi);
+
+static const struct option long_options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "skb-mode", no_argument, NULL, 'S' },
+ { "force", no_argument, NULL, 'F' },
+ { "load-egress", no_argument, NULL, 'X' },
+ { "stats", no_argument, NULL, 's' },
+ { "interval", required_argument, NULL, 'i' },
+ { "verbose", no_argument, NULL, 'v' },
+ {}
+};
+
+static int update_mac_map(struct bpf_map *map)
+{
+ int mac_map_fd = bpf_map__fd(map);
+ unsigned char mac_addr[6];
+ unsigned int ifindex;
+ int i, ret = -1;
+
+ for (i = 0; ifaces[i] > 0; i++) {
+ ifindex = ifaces[i];
+
+ ret = get_mac_addr(ifindex, mac_addr);
+ if (ret < 0) {
+ fprintf(stderr, "get interface %d mac failed\n",
+ ifindex);
+ return ret;
+ }
+
+ ret = bpf_map_update_elem(mac_map_fd, &ifindex, mac_addr, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to update mac address for ifindex %d\n",
+ ifindex);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_devmap_val devmap_val = {};
+ struct xdp_redirect_map_multi *skel;
+ struct bpf_program *ingress_prog;
+ bool xdp_devmap_attached = false;
+ struct bpf_map *forward_map;
+ int ret = EXIT_FAIL_OPTION;
+ unsigned long interval = 2;
+ char ifname[IF_NAMESIZE];
+ unsigned int ifindex;
+ bool generic = false;
+ bool force = false;
+ bool tried = false;
+ bool error = true;
+ int i, opt;
+
+ while ((opt = getopt_long(argc, argv, "hSFXi:vs",
+ long_options, NULL)) != -1) {
+ switch (opt) {
+ case 'S':
+ generic = true;
+ /* devmap_xmit tracepoint not available */
+ mask &= ~(SAMPLE_DEVMAP_XMIT_CNT |
+ SAMPLE_DEVMAP_XMIT_CNT_MULTI);
+ break;
+ case 'F':
+ force = true;
+ break;
+ case 'X':
+ xdp_devmap_attached = true;
+ break;
+ case 'i':
+ interval = strtoul(optarg, NULL, 0);
+ break;
+ case 'v':
+ sample_switch_mode();
+ break;
+ case 's':
+ mask |= SAMPLE_REDIRECT_MAP_CNT;
+ break;
+ case 'h':
+ error = false;
+ default:
+ sample_usage(argv, long_options, __doc__, mask, error);
+ return ret;
+ }
+ }
+
+ if (argc <= optind + 1) {
+ sample_usage(argv, long_options, __doc__, mask, error);
+ return ret;
+ }
+
+ skel = xdp_redirect_map_multi__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to xdp_redirect_map_multi__open: %s\n",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end;
+ }
+
+ ret = sample_init_pre_load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ ret = EXIT_FAIL_OPTION;
+ for (i = 0; i < MAX_IFACE_NUM && argv[optind + i]; i++) {
+ ifaces[i] = if_nametoindex(argv[optind + i]);
+ if (!ifaces[i])
+ ifaces[i] = strtoul(argv[optind + i], NULL, 0);
+ if (!if_indextoname(ifaces[i], ifname)) {
+ fprintf(stderr, "Bad interface index or name\n");
+ sample_usage(argv, long_options, __doc__, mask, true);
+ goto end_destroy;
+ }
+
+ skel->rodata->from_match[i] = ifaces[i];
+ skel->rodata->to_match[i] = ifaces[i];
+ }
+
+ ret = xdp_redirect_map_multi__load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to xdp_redirect_map_multi__load: %s\n",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ if (xdp_devmap_attached) {
+ /* Update mac_map with all egress interfaces' mac addr */
+ if (update_mac_map(skel->maps.mac_map) < 0) {
+ fprintf(stderr, "Updating mac address failed\n");
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+ }
+
+ ret = sample_init(skel, mask);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+
+ ingress_prog = skel->progs.xdp_redirect_map_native;
+ forward_map = skel->maps.forward_map_native;
+
+ for (i = 0; ifaces[i] > 0; i++) {
+ ifindex = ifaces[i];
+
+ ret = EXIT_FAIL_XDP;
+restart:
+ /* bind prog_fd to each interface */
+ if (sample_install_xdp(ingress_prog, ifindex, generic, force) < 0) {
+ if (generic && !tried) {
+ fprintf(stderr,
+ "Trying fallback to sizeof(int) as value_size for devmap in generic mode\n");
+ ingress_prog = skel->progs.xdp_redirect_map_general;
+ forward_map = skel->maps.forward_map_general;
+ tried = true;
+ goto restart;
+ }
+ goto end_destroy;
+ }
+
+ /* Add all the interfaces to forward group and attach
+ * egress devmap program if exist
+ */
+ devmap_val.ifindex = ifindex;
+ if (xdp_devmap_attached)
+ devmap_val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_devmap_prog);
+ ret = bpf_map_update_elem(bpf_map__fd(forward_map), &ifindex, &devmap_val, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to update devmap value: %s\n",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+ }
+
+ ret = sample_run(interval, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+ ret = EXIT_OK;
+end_destroy:
+ xdp_redirect_map_multi__destroy(skel);
+end:
+ sample_exit(ret);
+}
diff --git a/samples/bpf/xdp_redirect_map_user.c b/samples/bpf/xdp_redirect_map_user.c
new file mode 100644
index 000000000..c889a1394
--- /dev/null
+++ b/samples/bpf/xdp_redirect_map_user.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
+ */
+static const char *__doc__ =
+"XDP redirect tool, using BPF_MAP_TYPE_DEVMAP\n"
+"Usage: xdp_redirect_map <IFINDEX|IFNAME>_IN <IFINDEX|IFNAME>_OUT\n";
+
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <net/if.h>
+#include <unistd.h>
+#include <libgen.h>
+#include <getopt.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+#include "xdp_sample_user.h"
+#include "xdp_redirect_map.skel.h"
+
+static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT |
+ SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI;
+
+DEFINE_SAMPLE_INIT(xdp_redirect_map);
+
+static const struct option long_options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "skb-mode", no_argument, NULL, 'S' },
+ { "force", no_argument, NULL, 'F' },
+ { "load-egress", no_argument, NULL, 'X' },
+ { "stats", no_argument, NULL, 's' },
+ { "interval", required_argument, NULL, 'i' },
+ { "verbose", no_argument, NULL, 'v' },
+ {}
+};
+
+static int verbose = 0;
+
+int main(int argc, char **argv)
+{
+ struct bpf_devmap_val devmap_val = {};
+ bool xdp_devmap_attached = false;
+ struct xdp_redirect_map *skel;
+ char str[2 * IF_NAMESIZE + 1];
+ char ifname_out[IF_NAMESIZE];
+ struct bpf_map *tx_port_map;
+ char ifname_in[IF_NAMESIZE];
+ int ifindex_in, ifindex_out;
+ unsigned long interval = 2;
+ int ret = EXIT_FAIL_OPTION;
+ struct bpf_program *prog;
+ bool generic = false;
+ bool force = false;
+ bool tried = false;
+ bool error = true;
+ int opt, key = 0;
+
+ while ((opt = getopt_long(argc, argv, "hSFXi:vs",
+ long_options, NULL)) != -1) {
+ switch (opt) {
+ case 'S':
+ generic = true;
+ /* devmap_xmit tracepoint not available */
+ mask &= ~(SAMPLE_DEVMAP_XMIT_CNT |
+ SAMPLE_DEVMAP_XMIT_CNT_MULTI);
+ break;
+ case 'F':
+ force = true;
+ break;
+ case 'X':
+ xdp_devmap_attached = true;
+ break;
+ case 'i':
+ interval = strtoul(optarg, NULL, 0);
+ break;
+ case 'v':
+ sample_switch_mode();
+ verbose = 1;
+ break;
+ case 's':
+ mask |= SAMPLE_REDIRECT_MAP_CNT;
+ break;
+ case 'h':
+ error = false;
+ default:
+ sample_usage(argv, long_options, __doc__, mask, error);
+ return ret;
+ }
+ }
+
+ if (argc <= optind + 1) {
+ sample_usage(argv, long_options, __doc__, mask, true);
+ goto end;
+ }
+
+ ifindex_in = if_nametoindex(argv[optind]);
+ if (!ifindex_in)
+ ifindex_in = strtoul(argv[optind], NULL, 0);
+
+ ifindex_out = if_nametoindex(argv[optind + 1]);
+ if (!ifindex_out)
+ ifindex_out = strtoul(argv[optind + 1], NULL, 0);
+
+ if (!ifindex_in || !ifindex_out) {
+ fprintf(stderr, "Bad interface index or name\n");
+ sample_usage(argv, long_options, __doc__, mask, true);
+ goto end;
+ }
+
+ skel = xdp_redirect_map__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to xdp_redirect_map__open: %s\n",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end;
+ }
+
+ ret = sample_init_pre_load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ /* Load 2nd xdp prog on egress. */
+ if (xdp_devmap_attached) {
+ ret = get_mac_addr(ifindex_out, skel->rodata->tx_mac_addr);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to get interface %d mac address: %s\n",
+ ifindex_out, strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+ if (verbose)
+ printf("Egress ifindex:%d using src MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ ifindex_out,
+ skel->rodata->tx_mac_addr[0], skel->rodata->tx_mac_addr[1],
+ skel->rodata->tx_mac_addr[2], skel->rodata->tx_mac_addr[3],
+ skel->rodata->tx_mac_addr[4], skel->rodata->tx_mac_addr[5]);
+ }
+
+ skel->rodata->from_match[0] = ifindex_in;
+ skel->rodata->to_match[0] = ifindex_out;
+
+ ret = xdp_redirect_map__load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to xdp_redirect_map__load: %s\n",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ ret = sample_init(skel, mask);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+
+ prog = skel->progs.xdp_redirect_map_native;
+ tx_port_map = skel->maps.tx_port_native;
+restart:
+ if (sample_install_xdp(prog, ifindex_in, generic, force) < 0) {
+ /* First try with struct bpf_devmap_val as value for generic
+ * mode, then fallback to sizeof(int) for older kernels.
+ */
+ fprintf(stderr,
+ "Trying fallback to sizeof(int) as value_size for devmap in generic mode\n");
+ if (generic && !tried) {
+ prog = skel->progs.xdp_redirect_map_general;
+ tx_port_map = skel->maps.tx_port_general;
+ tried = true;
+ goto restart;
+ }
+ ret = EXIT_FAIL_XDP;
+ goto end_destroy;
+ }
+
+ /* Loading dummy XDP prog on out-device */
+ sample_install_xdp(skel->progs.xdp_redirect_dummy_prog, ifindex_out, generic, force);
+
+ devmap_val.ifindex = ifindex_out;
+ if (xdp_devmap_attached)
+ devmap_val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_redirect_map_egress);
+ ret = bpf_map_update_elem(bpf_map__fd(tx_port_map), &key, &devmap_val, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to update devmap value: %s\n",
+ strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ ret = EXIT_FAIL;
+ if (!if_indextoname(ifindex_in, ifname_in)) {
+ fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_in,
+ strerror(errno));
+ goto end_destroy;
+ }
+
+ if (!if_indextoname(ifindex_out, ifname_out)) {
+ fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_out,
+ strerror(errno));
+ goto end_destroy;
+ }
+
+ safe_strncpy(str, get_driver_name(ifindex_in), sizeof(str));
+ printf("Redirecting from %s (ifindex %d; driver %s) to %s (ifindex %d; driver %s)\n",
+ ifname_in, ifindex_in, str, ifname_out, ifindex_out, get_driver_name(ifindex_out));
+ snprintf(str, sizeof(str), "%s->%s", ifname_in, ifname_out);
+
+ ret = sample_run(interval, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+ ret = EXIT_OK;
+end_destroy:
+ xdp_redirect_map__destroy(skel);
+end:
+ sample_exit(ret);
+}
diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c
new file mode 100644
index 000000000..8663dd631
--- /dev/null
+++ b/samples/bpf/xdp_redirect_user.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 John Fastabend <john.r.fastabend@intel.com>
+ */
+static const char *__doc__ =
+"XDP redirect tool, using bpf_redirect helper\n"
+"Usage: xdp_redirect <IFINDEX|IFNAME>_IN <IFINDEX|IFNAME>_OUT\n";
+
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <net/if.h>
+#include <unistd.h>
+#include <libgen.h>
+#include <getopt.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+#include "xdp_sample_user.h"
+#include "xdp_redirect.skel.h"
+
+static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_CNT |
+ SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI;
+
+DEFINE_SAMPLE_INIT(xdp_redirect);
+
+static const struct option long_options[] = {
+ {"help", no_argument, NULL, 'h' },
+ {"skb-mode", no_argument, NULL, 'S' },
+ {"force", no_argument, NULL, 'F' },
+ {"stats", no_argument, NULL, 's' },
+ {"interval", required_argument, NULL, 'i' },
+ {"verbose", no_argument, NULL, 'v' },
+ {}
+};
+
+int main(int argc, char **argv)
+{
+ int ifindex_in, ifindex_out, opt;
+ char str[2 * IF_NAMESIZE + 1];
+ char ifname_out[IF_NAMESIZE];
+ char ifname_in[IF_NAMESIZE];
+ int ret = EXIT_FAIL_OPTION;
+ unsigned long interval = 2;
+ struct xdp_redirect *skel;
+ bool generic = false;
+ bool force = false;
+ bool error = true;
+
+ while ((opt = getopt_long(argc, argv, "hSFi:vs",
+ long_options, NULL)) != -1) {
+ switch (opt) {
+ case 'S':
+ generic = true;
+ mask &= ~(SAMPLE_DEVMAP_XMIT_CNT |
+ SAMPLE_DEVMAP_XMIT_CNT_MULTI);
+ break;
+ case 'F':
+ force = true;
+ break;
+ case 'i':
+ interval = strtoul(optarg, NULL, 0);
+ break;
+ case 'v':
+ sample_switch_mode();
+ break;
+ case 's':
+ mask |= SAMPLE_REDIRECT_CNT;
+ break;
+ case 'h':
+ error = false;
+ default:
+ sample_usage(argv, long_options, __doc__, mask, error);
+ return ret;
+ }
+ }
+
+ if (argc <= optind + 1) {
+ sample_usage(argv, long_options, __doc__, mask, true);
+ return ret;
+ }
+
+ ifindex_in = if_nametoindex(argv[optind]);
+ if (!ifindex_in)
+ ifindex_in = strtoul(argv[optind], NULL, 0);
+
+ ifindex_out = if_nametoindex(argv[optind + 1]);
+ if (!ifindex_out)
+ ifindex_out = strtoul(argv[optind + 1], NULL, 0);
+
+ if (!ifindex_in || !ifindex_out) {
+ fprintf(stderr, "Bad interface index or name\n");
+ sample_usage(argv, long_options, __doc__, mask, true);
+ goto end;
+ }
+
+ skel = xdp_redirect__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to xdp_redirect__open: %s\n", strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end;
+ }
+
+ ret = sample_init_pre_load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ skel->rodata->from_match[0] = ifindex_in;
+ skel->rodata->to_match[0] = ifindex_out;
+ skel->rodata->ifindex_out = ifindex_out;
+
+ ret = xdp_redirect__load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to xdp_redirect__load: %s\n", strerror(errno));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ ret = sample_init(skel, mask);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+
+ ret = EXIT_FAIL_XDP;
+ if (sample_install_xdp(skel->progs.xdp_redirect_prog, ifindex_in,
+ generic, force) < 0)
+ goto end_destroy;
+
+ /* Loading dummy XDP prog on out-device */
+ sample_install_xdp(skel->progs.xdp_redirect_dummy_prog, ifindex_out,
+ generic, force);
+
+ ret = EXIT_FAIL;
+ if (!if_indextoname(ifindex_in, ifname_in)) {
+ fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_in,
+ strerror(errno));
+ goto end_destroy;
+ }
+
+ if (!if_indextoname(ifindex_out, ifname_out)) {
+ fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_out,
+ strerror(errno));
+ goto end_destroy;
+ }
+
+ safe_strncpy(str, get_driver_name(ifindex_in), sizeof(str));
+ printf("Redirecting from %s (ifindex %d; driver %s) to %s (ifindex %d; driver %s)\n",
+ ifname_in, ifindex_in, str, ifname_out, ifindex_out, get_driver_name(ifindex_out));
+ snprintf(str, sizeof(str), "%s->%s", ifname_in, ifname_out);
+
+ ret = sample_run(interval, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+ ret = EXIT_OK;
+end_destroy:
+ xdp_redirect__destroy(skel);
+end:
+ sample_exit(ret);
+}
diff --git a/samples/bpf/xdp_router_ipv4.bpf.c b/samples/bpf/xdp_router_ipv4.bpf.c
new file mode 100644
index 000000000..0643330d1
--- /dev/null
+++ b/samples/bpf/xdp_router_ipv4.bpf.c
@@ -0,0 +1,189 @@
+/* Copyright (C) 2017 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include "vmlinux.h"
+#include "xdp_sample.bpf.h"
+#include "xdp_sample_shared.h"
+
+#define ETH_ALEN 6
+#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8
+
+struct trie_value {
+ __u8 prefix[4];
+ __be64 value;
+ int ifindex;
+ int metric;
+ __be32 gw;
+};
+
+/* Key for lpm_trie */
+union key_4 {
+ u32 b32[2];
+ u8 b8[8];
+};
+
+struct arp_entry {
+ __be64 mac;
+ __be32 dst;
+};
+
+struct direct_map {
+ struct arp_entry arp;
+ int ifindex;
+ __be64 mac;
+};
+
+/* Map for trie implementation */
+struct {
+ __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+ __uint(key_size, 8);
+ __uint(value_size, sizeof(struct trie_value));
+ __uint(max_entries, 50);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} lpm_map SEC(".maps");
+
+/* Map for ARP table */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __be32);
+ __type(value, __be64);
+ __uint(max_entries, 50);
+} arp_table SEC(".maps");
+
+/* Map to keep the exact match entries in the route table */
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __be32);
+ __type(value, struct direct_map);
+ __uint(max_entries, 50);
+} exact_match SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_DEVMAP);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(max_entries, 100);
+} tx_port SEC(".maps");
+
+SEC("xdp")
+int xdp_router_ipv4_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct ethhdr *eth = data;
+ u64 nh_off = sizeof(*eth);
+ struct datarec *rec;
+ __be16 h_proto;
+ u32 key = 0;
+
+ rec = bpf_map_lookup_elem(&rx_cnt, &key);
+ if (rec)
+ NO_TEAR_INC(rec->processed);
+
+ if (data + nh_off > data_end)
+ goto drop;
+
+ h_proto = eth->h_proto;
+ if (h_proto == bpf_htons(ETH_P_8021Q) ||
+ h_proto == bpf_htons(ETH_P_8021AD)) {
+ struct vlan_hdr *vhdr;
+
+ vhdr = data + nh_off;
+ nh_off += sizeof(struct vlan_hdr);
+ if (data + nh_off > data_end)
+ goto drop;
+
+ h_proto = vhdr->h_vlan_encapsulated_proto;
+ }
+
+ switch (bpf_ntohs(h_proto)) {
+ case ETH_P_ARP:
+ if (rec)
+ NO_TEAR_INC(rec->xdp_pass);
+ return XDP_PASS;
+ case ETH_P_IP: {
+ struct iphdr *iph = data + nh_off;
+ struct direct_map *direct_entry;
+ __be64 *dest_mac, *src_mac;
+ int forward_to;
+
+ if (iph + 1 > data_end)
+ goto drop;
+
+ direct_entry = bpf_map_lookup_elem(&exact_match, &iph->daddr);
+
+ /* Check for exact match, this would give a faster lookup */
+ if (direct_entry && direct_entry->mac &&
+ direct_entry->arp.mac) {
+ src_mac = &direct_entry->mac;
+ dest_mac = &direct_entry->arp.mac;
+ forward_to = direct_entry->ifindex;
+ } else {
+ struct trie_value *prefix_value;
+ union key_4 key4;
+
+ /* Look up in the trie for lpm */
+ key4.b32[0] = 32;
+ key4.b8[4] = iph->daddr & 0xff;
+ key4.b8[5] = (iph->daddr >> 8) & 0xff;
+ key4.b8[6] = (iph->daddr >> 16) & 0xff;
+ key4.b8[7] = (iph->daddr >> 24) & 0xff;
+
+ prefix_value = bpf_map_lookup_elem(&lpm_map, &key4);
+ if (!prefix_value)
+ goto drop;
+
+ forward_to = prefix_value->ifindex;
+ src_mac = &prefix_value->value;
+ if (!src_mac)
+ goto drop;
+
+ dest_mac = bpf_map_lookup_elem(&arp_table, &iph->daddr);
+ if (!dest_mac) {
+ if (!prefix_value->gw)
+ goto drop;
+
+ dest_mac = bpf_map_lookup_elem(&arp_table,
+ &prefix_value->gw);
+ if (!dest_mac) {
+ /* Forward the packet to the kernel in
+ * order to trigger ARP discovery for
+ * the default gw.
+ */
+ if (rec)
+ NO_TEAR_INC(rec->xdp_pass);
+ return XDP_PASS;
+ }
+ }
+ }
+
+ if (src_mac && dest_mac) {
+ int ret;
+
+ __builtin_memcpy(eth->h_dest, dest_mac, ETH_ALEN);
+ __builtin_memcpy(eth->h_source, src_mac, ETH_ALEN);
+
+ ret = bpf_redirect_map(&tx_port, forward_to, 0);
+ if (ret == XDP_REDIRECT) {
+ if (rec)
+ NO_TEAR_INC(rec->xdp_redirect);
+ return ret;
+ }
+ }
+ }
+ default:
+ break;
+ }
+drop:
+ if (rec)
+ NO_TEAR_INC(rec->xdp_drop);
+
+ return XDP_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
new file mode 100644
index 000000000..683913bbf
--- /dev/null
+++ b/samples/bpf/xdp_router_ipv4_user.c
@@ -0,0 +1,699 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2017 Cavium, Inc.
+ */
+#include <linux/bpf.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+#include <arpa/inet.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <net/if.h>
+#include <netdb.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include "bpf_util.h"
+#include <bpf/libbpf.h>
+#include <libgen.h>
+#include <getopt.h>
+#include <pthread.h>
+#include "xdp_sample_user.h"
+#include "xdp_router_ipv4.skel.h"
+
+static const char *__doc__ =
+"XDP IPv4 router implementation\n"
+"Usage: xdp_router_ipv4 <IFNAME-0> ... <IFNAME-N>\n";
+
+static char buf[8192];
+static int lpm_map_fd;
+static int arp_table_map_fd;
+static int exact_match_map_fd;
+static int tx_port_map_fd;
+
+static bool routes_thread_exit;
+static int interval = 5;
+
+static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT |
+ SAMPLE_DEVMAP_XMIT_CNT_MULTI | SAMPLE_EXCEPTION_CNT;
+
+DEFINE_SAMPLE_INIT(xdp_router_ipv4);
+
+static const struct option long_options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "skb-mode", no_argument, NULL, 'S' },
+ { "force", no_argument, NULL, 'F' },
+ { "interval", required_argument, NULL, 'i' },
+ { "verbose", no_argument, NULL, 'v' },
+ { "stats", no_argument, NULL, 's' },
+ {}
+};
+
+static int get_route_table(int rtm_family);
+
+static int recv_msg(struct sockaddr_nl sock_addr, int sock)
+{
+ struct nlmsghdr *nh;
+ int len, nll = 0;
+ char *buf_ptr;
+
+ buf_ptr = buf;
+ while (1) {
+ len = recv(sock, buf_ptr, sizeof(buf) - nll, 0);
+ if (len < 0)
+ return len;
+
+ nh = (struct nlmsghdr *)buf_ptr;
+
+ if (nh->nlmsg_type == NLMSG_DONE)
+ break;
+ buf_ptr += len;
+ nll += len;
+ if ((sock_addr.nl_groups & RTMGRP_NEIGH) == RTMGRP_NEIGH)
+ break;
+
+ if ((sock_addr.nl_groups & RTMGRP_IPV4_ROUTE) == RTMGRP_IPV4_ROUTE)
+ break;
+ }
+ return nll;
+}
+
+/* Function to parse the route entry returned by netlink
+ * Updates the route entry related map entries
+ */
+static void read_route(struct nlmsghdr *nh, int nll)
+{
+ char dsts[24], gws[24], ifs[16], dsts_len[24], metrics[24];
+ struct bpf_lpm_trie_key *prefix_key;
+ struct rtattr *rt_attr;
+ struct rtmsg *rt_msg;
+ int rtm_family;
+ int rtl;
+ int i;
+ struct route_table {
+ int dst_len, iface, metric;
+ __be32 dst, gw;
+ __be64 mac;
+ } route;
+ struct arp_table {
+ __be64 mac;
+ __be32 dst;
+ };
+
+ struct direct_map {
+ struct arp_table arp;
+ int ifindex;
+ __be64 mac;
+ } direct_entry;
+
+ memset(&route, 0, sizeof(route));
+ for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) {
+ rt_msg = (struct rtmsg *)NLMSG_DATA(nh);
+ rtm_family = rt_msg->rtm_family;
+ if (rtm_family == AF_INET)
+ if (rt_msg->rtm_table != RT_TABLE_MAIN)
+ continue;
+ rt_attr = (struct rtattr *)RTM_RTA(rt_msg);
+ rtl = RTM_PAYLOAD(nh);
+
+ for (; RTA_OK(rt_attr, rtl); rt_attr = RTA_NEXT(rt_attr, rtl)) {
+ switch (rt_attr->rta_type) {
+ case NDA_DST:
+ sprintf(dsts, "%u",
+ (*((__be32 *)RTA_DATA(rt_attr))));
+ break;
+ case RTA_GATEWAY:
+ sprintf(gws, "%u",
+ *((__be32 *)RTA_DATA(rt_attr)));
+ break;
+ case RTA_OIF:
+ sprintf(ifs, "%u",
+ *((int *)RTA_DATA(rt_attr)));
+ break;
+ case RTA_METRICS:
+ sprintf(metrics, "%u",
+ *((int *)RTA_DATA(rt_attr)));
+ default:
+ break;
+ }
+ }
+ sprintf(dsts_len, "%d", rt_msg->rtm_dst_len);
+ route.dst = atoi(dsts);
+ route.dst_len = atoi(dsts_len);
+ route.gw = atoi(gws);
+ route.iface = atoi(ifs);
+ route.metric = atoi(metrics);
+ assert(get_mac_addr(route.iface, &route.mac) == 0);
+ assert(bpf_map_update_elem(tx_port_map_fd,
+ &route.iface, &route.iface, 0) == 0);
+ if (rtm_family == AF_INET) {
+ struct trie_value {
+ __u8 prefix[4];
+ __be64 value;
+ int ifindex;
+ int metric;
+ __be32 gw;
+ } *prefix_value;
+
+ prefix_key = alloca(sizeof(*prefix_key) + 3);
+ prefix_value = alloca(sizeof(*prefix_value));
+
+ prefix_key->prefixlen = 32;
+ prefix_key->prefixlen = route.dst_len;
+ direct_entry.mac = route.mac & 0xffffffffffff;
+ direct_entry.ifindex = route.iface;
+ direct_entry.arp.mac = 0;
+ direct_entry.arp.dst = 0;
+ if (route.dst_len == 32) {
+ if (nh->nlmsg_type == RTM_DELROUTE) {
+ assert(bpf_map_delete_elem(exact_match_map_fd,
+ &route.dst) == 0);
+ } else {
+ if (bpf_map_lookup_elem(arp_table_map_fd,
+ &route.dst,
+ &direct_entry.arp.mac) == 0)
+ direct_entry.arp.dst = route.dst;
+ assert(bpf_map_update_elem(exact_match_map_fd,
+ &route.dst,
+ &direct_entry, 0) == 0);
+ }
+ }
+ for (i = 0; i < 4; i++)
+ prefix_key->data[i] = (route.dst >> i * 8) & 0xff;
+
+ if (bpf_map_lookup_elem(lpm_map_fd, prefix_key,
+ prefix_value) < 0) {
+ for (i = 0; i < 4; i++)
+ prefix_value->prefix[i] = prefix_key->data[i];
+ prefix_value->value = route.mac & 0xffffffffffff;
+ prefix_value->ifindex = route.iface;
+ prefix_value->gw = route.gw;
+ prefix_value->metric = route.metric;
+
+ assert(bpf_map_update_elem(lpm_map_fd,
+ prefix_key,
+ prefix_value, 0
+ ) == 0);
+ } else {
+ if (nh->nlmsg_type == RTM_DELROUTE) {
+ assert(bpf_map_delete_elem(lpm_map_fd,
+ prefix_key
+ ) == 0);
+ /* Rereading the route table to check if
+ * there is an entry with the same
+ * prefix but a different metric as the
+ * deleted entry.
+ */
+ get_route_table(AF_INET);
+ } else if (prefix_key->data[0] ==
+ prefix_value->prefix[0] &&
+ prefix_key->data[1] ==
+ prefix_value->prefix[1] &&
+ prefix_key->data[2] ==
+ prefix_value->prefix[2] &&
+ prefix_key->data[3] ==
+ prefix_value->prefix[3] &&
+ route.metric >= prefix_value->metric) {
+ continue;
+ } else {
+ for (i = 0; i < 4; i++)
+ prefix_value->prefix[i] =
+ prefix_key->data[i];
+ prefix_value->value =
+ route.mac & 0xffffffffffff;
+ prefix_value->ifindex = route.iface;
+ prefix_value->gw = route.gw;
+ prefix_value->metric = route.metric;
+ assert(bpf_map_update_elem(lpm_map_fd,
+ prefix_key,
+ prefix_value,
+ 0) == 0);
+ }
+ }
+ }
+ memset(&route, 0, sizeof(route));
+ memset(dsts, 0, sizeof(dsts));
+ memset(dsts_len, 0, sizeof(dsts_len));
+ memset(gws, 0, sizeof(gws));
+ memset(ifs, 0, sizeof(ifs));
+ memset(&route, 0, sizeof(route));
+ }
+}
+
+/* Function to read the existing route table when the process is launched*/
+static int get_route_table(int rtm_family)
+{
+ struct sockaddr_nl sa;
+ struct nlmsghdr *nh;
+ int sock, seq = 0;
+ struct msghdr msg;
+ struct iovec iov;
+ int ret = 0;
+ int nll;
+
+ struct {
+ struct nlmsghdr nl;
+ struct rtmsg rt;
+ char buf[8192];
+ } req;
+
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock < 0) {
+ fprintf(stderr, "open netlink socket: %s\n", strerror(errno));
+ return -errno;
+ }
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+ if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
+ fprintf(stderr, "bind netlink socket: %s\n", strerror(errno));
+ ret = -errno;
+ goto cleanup;
+ }
+ memset(&req, 0, sizeof(req));
+ req.nl.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
+ req.nl.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
+ req.nl.nlmsg_type = RTM_GETROUTE;
+
+ req.rt.rtm_family = rtm_family;
+ req.rt.rtm_table = RT_TABLE_MAIN;
+ req.nl.nlmsg_pid = 0;
+ req.nl.nlmsg_seq = ++seq;
+ memset(&msg, 0, sizeof(msg));
+ iov.iov_base = (void *)&req.nl;
+ iov.iov_len = req.nl.nlmsg_len;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ ret = sendmsg(sock, &msg, 0);
+ if (ret < 0) {
+ fprintf(stderr, "send to netlink: %s\n", strerror(errno));
+ ret = -errno;
+ goto cleanup;
+ }
+ memset(buf, 0, sizeof(buf));
+ nll = recv_msg(sa, sock);
+ if (nll < 0) {
+ fprintf(stderr, "recv from netlink: %s\n", strerror(nll));
+ ret = nll;
+ goto cleanup;
+ }
+ nh = (struct nlmsghdr *)buf;
+ read_route(nh, nll);
+cleanup:
+ close(sock);
+ return ret;
+}
+
+/* Function to parse the arp entry returned by netlink
+ * Updates the arp entry related map entries
+ */
+static void read_arp(struct nlmsghdr *nh, int nll)
+{
+ struct rtattr *rt_attr;
+ char dsts[24], mac[24];
+ struct ndmsg *rt_msg;
+ int rtl, ndm_family;
+
+ struct arp_table {
+ __be64 mac;
+ __be32 dst;
+ } arp_entry;
+ struct direct_map {
+ struct arp_table arp;
+ int ifindex;
+ __be64 mac;
+ } direct_entry;
+
+ for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) {
+ rt_msg = (struct ndmsg *)NLMSG_DATA(nh);
+ rt_attr = (struct rtattr *)RTM_RTA(rt_msg);
+ ndm_family = rt_msg->ndm_family;
+ rtl = RTM_PAYLOAD(nh);
+ for (; RTA_OK(rt_attr, rtl); rt_attr = RTA_NEXT(rt_attr, rtl)) {
+ switch (rt_attr->rta_type) {
+ case NDA_DST:
+ sprintf(dsts, "%u",
+ *((__be32 *)RTA_DATA(rt_attr)));
+ break;
+ case NDA_LLADDR:
+ sprintf(mac, "%lld",
+ *((__be64 *)RTA_DATA(rt_attr)));
+ break;
+ default:
+ break;
+ }
+ }
+ arp_entry.dst = atoi(dsts);
+ arp_entry.mac = atol(mac);
+
+ if (ndm_family == AF_INET) {
+ if (bpf_map_lookup_elem(exact_match_map_fd,
+ &arp_entry.dst,
+ &direct_entry) == 0) {
+ if (nh->nlmsg_type == RTM_DELNEIGH) {
+ direct_entry.arp.dst = 0;
+ direct_entry.arp.mac = 0;
+ } else if (nh->nlmsg_type == RTM_NEWNEIGH) {
+ direct_entry.arp.dst = arp_entry.dst;
+ direct_entry.arp.mac = arp_entry.mac;
+ }
+ assert(bpf_map_update_elem(exact_match_map_fd,
+ &arp_entry.dst,
+ &direct_entry, 0
+ ) == 0);
+ memset(&direct_entry, 0, sizeof(direct_entry));
+ }
+ if (nh->nlmsg_type == RTM_DELNEIGH) {
+ assert(bpf_map_delete_elem(arp_table_map_fd,
+ &arp_entry.dst) == 0);
+ } else if (nh->nlmsg_type == RTM_NEWNEIGH) {
+ assert(bpf_map_update_elem(arp_table_map_fd,
+ &arp_entry.dst,
+ &arp_entry.mac, 0
+ ) == 0);
+ }
+ }
+ memset(&arp_entry, 0, sizeof(arp_entry));
+ memset(dsts, 0, sizeof(dsts));
+ }
+}
+
+/* Function to read the existing arp table when the process is launched*/
+static int get_arp_table(int rtm_family)
+{
+ struct sockaddr_nl sa;
+ struct nlmsghdr *nh;
+ int sock, seq = 0;
+ struct msghdr msg;
+ struct iovec iov;
+ int ret = 0;
+ int nll;
+ struct {
+ struct nlmsghdr nl;
+ struct ndmsg rt;
+ char buf[8192];
+ } req;
+
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock < 0) {
+ fprintf(stderr, "open netlink socket: %s\n", strerror(errno));
+ return -errno;
+ }
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+ if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
+ fprintf(stderr, "bind netlink socket: %s\n", strerror(errno));
+ ret = -errno;
+ goto cleanup;
+ }
+ memset(&req, 0, sizeof(req));
+ req.nl.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg));
+ req.nl.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
+ req.nl.nlmsg_type = RTM_GETNEIGH;
+ req.rt.ndm_state = NUD_REACHABLE;
+ req.rt.ndm_family = rtm_family;
+ req.nl.nlmsg_pid = 0;
+ req.nl.nlmsg_seq = ++seq;
+ memset(&msg, 0, sizeof(msg));
+ iov.iov_base = (void *)&req.nl;
+ iov.iov_len = req.nl.nlmsg_len;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ ret = sendmsg(sock, &msg, 0);
+ if (ret < 0) {
+ fprintf(stderr, "send to netlink: %s\n", strerror(errno));
+ ret = -errno;
+ goto cleanup;
+ }
+ memset(buf, 0, sizeof(buf));
+ nll = recv_msg(sa, sock);
+ if (nll < 0) {
+ fprintf(stderr, "recv from netlink: %s\n", strerror(nll));
+ ret = nll;
+ goto cleanup;
+ }
+ nh = (struct nlmsghdr *)buf;
+ read_arp(nh, nll);
+cleanup:
+ close(sock);
+ return ret;
+}
+
+/* Function to keep track and update changes in route and arp table
+ * Give regular statistics of packets forwarded
+ */
+static void *monitor_routes_thread(void *arg)
+{
+ struct pollfd fds_route, fds_arp;
+ struct sockaddr_nl la, lr;
+ int sock, sock_arp, nll;
+ struct nlmsghdr *nh;
+
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock < 0) {
+ fprintf(stderr, "open netlink socket: %s\n", strerror(errno));
+ return NULL;
+ }
+
+ fcntl(sock, F_SETFL, O_NONBLOCK);
+ memset(&lr, 0, sizeof(lr));
+ lr.nl_family = AF_NETLINK;
+ lr.nl_groups = RTMGRP_IPV6_ROUTE | RTMGRP_IPV4_ROUTE | RTMGRP_NOTIFY;
+ if (bind(sock, (struct sockaddr *)&lr, sizeof(lr)) < 0) {
+ fprintf(stderr, "bind netlink socket: %s\n", strerror(errno));
+ close(sock);
+ return NULL;
+ }
+
+ fds_route.fd = sock;
+ fds_route.events = POLL_IN;
+
+ sock_arp = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock_arp < 0) {
+ fprintf(stderr, "open netlink socket: %s\n", strerror(errno));
+ close(sock);
+ return NULL;
+ }
+
+ fcntl(sock_arp, F_SETFL, O_NONBLOCK);
+ memset(&la, 0, sizeof(la));
+ la.nl_family = AF_NETLINK;
+ la.nl_groups = RTMGRP_NEIGH | RTMGRP_NOTIFY;
+ if (bind(sock_arp, (struct sockaddr *)&la, sizeof(la)) < 0) {
+ fprintf(stderr, "bind netlink socket: %s\n", strerror(errno));
+ goto cleanup;
+ }
+
+ fds_arp.fd = sock_arp;
+ fds_arp.events = POLL_IN;
+
+ /* dump route and arp tables */
+ if (get_arp_table(AF_INET) < 0) {
+ fprintf(stderr, "Failed reading arp table\n");
+ goto cleanup;
+ }
+
+ if (get_route_table(AF_INET) < 0) {
+ fprintf(stderr, "Failed reading route table\n");
+ goto cleanup;
+ }
+
+ while (!routes_thread_exit) {
+ memset(buf, 0, sizeof(buf));
+ if (poll(&fds_route, 1, 3) == POLL_IN) {
+ nll = recv_msg(lr, sock);
+ if (nll < 0) {
+ fprintf(stderr, "recv from netlink: %s\n",
+ strerror(nll));
+ goto cleanup;
+ }
+
+ nh = (struct nlmsghdr *)buf;
+ read_route(nh, nll);
+ }
+
+ memset(buf, 0, sizeof(buf));
+ if (poll(&fds_arp, 1, 3) == POLL_IN) {
+ nll = recv_msg(la, sock_arp);
+ if (nll < 0) {
+ fprintf(stderr, "recv from netlink: %s\n",
+ strerror(nll));
+ goto cleanup;
+ }
+
+ nh = (struct nlmsghdr *)buf;
+ read_arp(nh, nll);
+ }
+
+ sleep(interval);
+ }
+
+cleanup:
+ close(sock_arp);
+ close(sock);
+ return NULL;
+}
+
+static void usage(char *argv[], const struct option *long_options,
+ const char *doc, int mask, bool error,
+ struct bpf_object *obj)
+{
+ sample_usage(argv, long_options, doc, mask, error);
+}
+
+int main(int argc, char **argv)
+{
+ bool error = true, generic = false, force = false;
+ int opt, ret = EXIT_FAIL_BPF;
+ struct xdp_router_ipv4 *skel;
+ int i, total_ifindex = argc - 1;
+ char **ifname_list = argv + 1;
+ pthread_t routes_thread;
+ int longindex = 0;
+
+ if (libbpf_set_strict_mode(LIBBPF_STRICT_ALL) < 0) {
+ fprintf(stderr, "Failed to set libbpf strict mode: %s\n",
+ strerror(errno));
+ goto end;
+ }
+
+ skel = xdp_router_ipv4__open();
+ if (!skel) {
+ fprintf(stderr, "Failed to xdp_router_ipv4__open: %s\n",
+ strerror(errno));
+ goto end;
+ }
+
+ ret = sample_init_pre_load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to sample_init_pre_load: %s\n",
+ strerror(-ret));
+ ret = EXIT_FAIL_BPF;
+ goto end_destroy;
+ }
+
+ ret = xdp_router_ipv4__load(skel);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to xdp_router_ipv4__load: %s\n",
+ strerror(errno));
+ goto end_destroy;
+ }
+
+ ret = sample_init(skel, mask);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+
+ while ((opt = getopt_long(argc, argv, "si:SFvh",
+ long_options, &longindex)) != -1) {
+ switch (opt) {
+ case 's':
+ mask |= SAMPLE_REDIRECT_MAP_CNT;
+ total_ifindex--;
+ ifname_list++;
+ break;
+ case 'i':
+ interval = strtoul(optarg, NULL, 0);
+ total_ifindex -= 2;
+ ifname_list += 2;
+ break;
+ case 'S':
+ generic = true;
+ total_ifindex--;
+ ifname_list++;
+ break;
+ case 'F':
+ force = true;
+ total_ifindex--;
+ ifname_list++;
+ break;
+ case 'v':
+ sample_switch_mode();
+ total_ifindex--;
+ ifname_list++;
+ break;
+ case 'h':
+ error = false;
+ default:
+ usage(argv, long_options, __doc__, mask, error, skel->obj);
+ goto end_destroy;
+ }
+ }
+
+ ret = EXIT_FAIL_OPTION;
+ if (optind == argc) {
+ usage(argv, long_options, __doc__, mask, true, skel->obj);
+ goto end_destroy;
+ }
+
+ lpm_map_fd = bpf_map__fd(skel->maps.lpm_map);
+ if (lpm_map_fd < 0) {
+ fprintf(stderr, "Failed loading lpm_map %s\n",
+ strerror(-lpm_map_fd));
+ goto end_destroy;
+ }
+ arp_table_map_fd = bpf_map__fd(skel->maps.arp_table);
+ if (arp_table_map_fd < 0) {
+ fprintf(stderr, "Failed loading arp_table_map_fd %s\n",
+ strerror(-arp_table_map_fd));
+ goto end_destroy;
+ }
+ exact_match_map_fd = bpf_map__fd(skel->maps.exact_match);
+ if (exact_match_map_fd < 0) {
+ fprintf(stderr, "Failed loading exact_match_map_fd %s\n",
+ strerror(-exact_match_map_fd));
+ goto end_destroy;
+ }
+ tx_port_map_fd = bpf_map__fd(skel->maps.tx_port);
+ if (tx_port_map_fd < 0) {
+ fprintf(stderr, "Failed loading tx_port_map_fd %s\n",
+ strerror(-tx_port_map_fd));
+ goto end_destroy;
+ }
+
+ ret = EXIT_FAIL_XDP;
+ for (i = 0; i < total_ifindex; i++) {
+ int index = if_nametoindex(ifname_list[i]);
+
+ if (!index) {
+ fprintf(stderr, "Interface %s not found %s\n",
+ ifname_list[i], strerror(-tx_port_map_fd));
+ goto end_destroy;
+ }
+ if (sample_install_xdp(skel->progs.xdp_router_ipv4_prog,
+ index, generic, force) < 0)
+ goto end_destroy;
+ }
+
+ ret = pthread_create(&routes_thread, NULL, monitor_routes_thread, NULL);
+ if (ret) {
+ fprintf(stderr, "Failed creating routes_thread: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_destroy;
+ }
+
+ ret = sample_run(interval, NULL, NULL);
+ routes_thread_exit = true;
+
+ if (ret < 0) {
+ fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
+ ret = EXIT_FAIL;
+ goto end_thread_wait;
+ }
+ ret = EXIT_OK;
+
+end_thread_wait:
+ pthread_join(routes_thread, NULL);
+end_destroy:
+ xdp_router_ipv4__destroy(skel);
+end:
+ sample_exit(ret);
+}
diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
new file mode 100644
index 000000000..5e7459f9b
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_kern.c
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ * Example howto extract XDP RX-queue info
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/in.h>
+#include <bpf/bpf_helpers.h>
+
+/* Config setup from with userspace
+ *
+ * User-side setup ifindex in config_map, to verify that
+ * ctx->ingress_ifindex is correct (against configured ifindex)
+ */
+struct config {
+ __u32 action;
+ int ifindex;
+ __u32 options;
+};
+enum cfg_options_flags {
+ NO_TOUCH = 0x0U,
+ READ_MEM = 0x1U,
+ SWAP_MAC = 0x2U,
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct config);
+ __uint(max_entries, 1);
+} config_map SEC(".maps");
+
+/* Common stats data record (shared with userspace) */
+struct datarec {
+ __u64 processed;
+ __u64 issue;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, 1);
+} stats_global_map SEC(".maps");
+
+#define MAX_RXQs 64
+
+/* Stats per rx_queue_index (per CPU) */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, MAX_RXQs + 1);
+} rx_queue_index_map SEC(".maps");
+
+static __always_inline
+void swap_src_dst_mac(void *data)
+{
+ unsigned short *p = data;
+ unsigned short dst[3];
+
+ dst[0] = p[0];
+ dst[1] = p[1];
+ dst[2] = p[2];
+ p[0] = p[3];
+ p[1] = p[4];
+ p[2] = p[5];
+ p[3] = dst[0];
+ p[4] = dst[1];
+ p[5] = dst[2];
+}
+
+SEC("xdp_prog0")
+int xdp_prognum0(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct datarec *rec, *rxq_rec;
+ int ingress_ifindex;
+ struct config *config;
+ u32 key = 0;
+
+ /* Global stats record */
+ rec = bpf_map_lookup_elem(&stats_global_map, &key);
+ if (!rec)
+ return XDP_ABORTED;
+ rec->processed++;
+
+ /* Accessing ctx->ingress_ifindex, cause BPF to rewrite BPF
+ * instructions inside kernel to access xdp_rxq->dev->ifindex
+ */
+ ingress_ifindex = ctx->ingress_ifindex;
+
+ config = bpf_map_lookup_elem(&config_map, &key);
+ if (!config)
+ return XDP_ABORTED;
+
+ /* Simple test: check ctx provided ifindex is as expected */
+ if (ingress_ifindex != config->ifindex) {
+ /* count this error case */
+ rec->issue++;
+ return XDP_ABORTED;
+ }
+
+ /* Update stats per rx_queue_index. Handle if rx_queue_index
+ * is larger than stats map can contain info for.
+ */
+ key = ctx->rx_queue_index;
+ if (key >= MAX_RXQs)
+ key = MAX_RXQs;
+ rxq_rec = bpf_map_lookup_elem(&rx_queue_index_map, &key);
+ if (!rxq_rec)
+ return XDP_ABORTED;
+ rxq_rec->processed++;
+ if (key == MAX_RXQs)
+ rxq_rec->issue++;
+
+ /* Default: Don't touch packet data, only count packets */
+ if (unlikely(config->options & (READ_MEM|SWAP_MAC))) {
+ struct ethhdr *eth = data;
+
+ if (eth + 1 > data_end)
+ return XDP_ABORTED;
+
+ /* Avoid compiler removing this: Drop non 802.3 Ethertypes */
+ if (ntohs(eth->h_proto) < ETH_P_802_3_MIN)
+ return XDP_ABORTED;
+
+ /* XDP_TX requires changing MAC-addrs, else HW may drop.
+ * Can also be enabled with --swapmac (for test purposes)
+ */
+ if (unlikely(config->options & SWAP_MAC))
+ swap_src_dst_mac(data);
+ }
+
+ return config->action;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
new file mode 100644
index 000000000..08f5331d2
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ */
+static const char *__doc__ = " XDP RX-queue info extract example\n\n"
+ "Monitor how many packets per sec (pps) are received\n"
+ "per NIC RX queue index and which CPU processed the packet\n"
+ ;
+
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <locale.h>
+#include <getopt.h>
+#include <net/if.h>
+#include <time.h>
+#include <limits.h>
+#include <arpa/inet.h>
+#include <linux/if_link.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "bpf_util.h"
+
+static int ifindex = -1;
+static char ifname_buf[IF_NAMESIZE];
+static char *ifname;
+static __u32 prog_id;
+
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+
+static struct bpf_map *stats_global_map;
+static struct bpf_map *rx_queue_index_map;
+
+/* Exit return codes */
+#define EXIT_OK 0
+#define EXIT_FAIL 1
+#define EXIT_FAIL_OPTION 2
+#define EXIT_FAIL_XDP 3
+#define EXIT_FAIL_BPF 4
+#define EXIT_FAIL_MEM 5
+
+#define FAIL_MEM_SIG INT_MAX
+#define FAIL_STAT_SIG (INT_MAX - 1)
+
+static const struct option long_options[] = {
+ {"help", no_argument, NULL, 'h' },
+ {"dev", required_argument, NULL, 'd' },
+ {"skb-mode", no_argument, NULL, 'S' },
+ {"sec", required_argument, NULL, 's' },
+ {"no-separators", no_argument, NULL, 'z' },
+ {"action", required_argument, NULL, 'a' },
+ {"readmem", no_argument, NULL, 'r' },
+ {"swapmac", no_argument, NULL, 'm' },
+ {"force", no_argument, NULL, 'F' },
+ {0, 0, NULL, 0 }
+};
+
+static void int_exit(int sig)
+{
+ __u32 curr_prog_id = 0;
+
+ if (ifindex > -1) {
+ if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
+ printf("bpf_xdp_query_id failed\n");
+ exit(EXIT_FAIL);
+ }
+ if (prog_id == curr_prog_id) {
+ fprintf(stderr,
+ "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
+ ifindex, ifname);
+ bpf_xdp_detach(ifindex, xdp_flags, NULL);
+ } else if (!curr_prog_id) {
+ printf("couldn't find a prog id on a given iface\n");
+ } else {
+ printf("program on interface changed, not removing\n");
+ }
+ }
+
+ if (sig == FAIL_MEM_SIG)
+ exit(EXIT_FAIL_MEM);
+ else if (sig == FAIL_STAT_SIG)
+ exit(EXIT_FAIL);
+
+ exit(EXIT_OK);
+}
+
+struct config {
+ __u32 action;
+ int ifindex;
+ __u32 options;
+};
+enum cfg_options_flags {
+ NO_TOUCH = 0x0U,
+ READ_MEM = 0x1U,
+ SWAP_MAC = 0x2U,
+};
+#define XDP_ACTION_MAX (XDP_TX + 1)
+#define XDP_ACTION_MAX_STRLEN 11
+static const char *xdp_action_names[XDP_ACTION_MAX] = {
+ [XDP_ABORTED] = "XDP_ABORTED",
+ [XDP_DROP] = "XDP_DROP",
+ [XDP_PASS] = "XDP_PASS",
+ [XDP_TX] = "XDP_TX",
+};
+
+static const char *action2str(int action)
+{
+ if (action < XDP_ACTION_MAX)
+ return xdp_action_names[action];
+ return NULL;
+}
+
+static int parse_xdp_action(char *action_str)
+{
+ size_t maxlen;
+ __u64 action = -1;
+ int i;
+
+ for (i = 0; i < XDP_ACTION_MAX; i++) {
+ maxlen = XDP_ACTION_MAX_STRLEN;
+ if (strncmp(xdp_action_names[i], action_str, maxlen) == 0) {
+ action = i;
+ break;
+ }
+ }
+ return action;
+}
+
+static void list_xdp_actions(void)
+{
+ int i;
+
+ printf("Available XDP --action <options>\n");
+ for (i = 0; i < XDP_ACTION_MAX; i++)
+ printf("\t%s\n", xdp_action_names[i]);
+ printf("\n");
+}
+
+static char* options2str(enum cfg_options_flags flag)
+{
+ if (flag == NO_TOUCH)
+ return "no_touch";
+ if (flag & SWAP_MAC)
+ return "swapmac";
+ if (flag & READ_MEM)
+ return "read";
+ fprintf(stderr, "ERR: Unknown config option flags");
+ int_exit(FAIL_STAT_SIG);
+ return "unknown";
+}
+
+static void usage(char *argv[])
+{
+ int i;
+
+ printf("\nDOCUMENTATION:\n%s\n", __doc__);
+ printf(" Usage: %s (options-see-below)\n", argv[0]);
+ printf(" Listing options:\n");
+ for (i = 0; long_options[i].name != 0; i++) {
+ printf(" --%-12s", long_options[i].name);
+ if (long_options[i].flag != NULL)
+ printf(" flag (internal value:%d)",
+ *long_options[i].flag);
+ else
+ printf(" short-option: -%c",
+ long_options[i].val);
+ printf("\n");
+ }
+ printf("\n");
+ list_xdp_actions();
+}
+
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+static __u64 gettime(void)
+{
+ struct timespec t;
+ int res;
+
+ res = clock_gettime(CLOCK_MONOTONIC, &t);
+ if (res < 0) {
+ fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
+ int_exit(FAIL_STAT_SIG);
+ }
+ return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
+}
+
+/* Common stats data record shared with _kern.c */
+struct datarec {
+ __u64 processed;
+ __u64 issue;
+};
+struct record {
+ __u64 timestamp;
+ struct datarec total;
+ struct datarec *cpu;
+};
+struct stats_record {
+ struct record stats;
+ struct record *rxq;
+};
+
+static struct datarec *alloc_record_per_cpu(void)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct datarec *array;
+
+ array = calloc(nr_cpus, sizeof(struct datarec));
+ if (!array) {
+ fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+ int_exit(FAIL_MEM_SIG);
+ }
+ return array;
+}
+
+static struct record *alloc_record_per_rxq(void)
+{
+ unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
+ struct record *array;
+
+ array = calloc(nr_rxqs, sizeof(struct record));
+ if (!array) {
+ fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
+ int_exit(FAIL_MEM_SIG);
+ }
+ return array;
+}
+
+static struct stats_record *alloc_stats_record(void)
+{
+ unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
+ struct stats_record *rec;
+ int i;
+
+ rec = calloc(1, sizeof(struct stats_record));
+ if (!rec) {
+ fprintf(stderr, "Mem alloc error\n");
+ int_exit(FAIL_MEM_SIG);
+ }
+ rec->rxq = alloc_record_per_rxq();
+ for (i = 0; i < nr_rxqs; i++)
+ rec->rxq[i].cpu = alloc_record_per_cpu();
+
+ rec->stats.cpu = alloc_record_per_cpu();
+ return rec;
+}
+
+static void free_stats_record(struct stats_record *r)
+{
+ unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
+ int i;
+
+ for (i = 0; i < nr_rxqs; i++)
+ free(r->rxq[i].cpu);
+
+ free(r->rxq);
+ free(r->stats.cpu);
+ free(r);
+}
+
+static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
+{
+ /* For percpu maps, userspace gets a value per possible CPU */
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct datarec values[nr_cpus];
+ __u64 sum_processed = 0;
+ __u64 sum_issue = 0;
+ int i;
+
+ if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
+ fprintf(stderr,
+ "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
+ return false;
+ }
+ /* Get time as close as possible to reading map contents */
+ rec->timestamp = gettime();
+
+ /* Record and sum values from each CPU */
+ for (i = 0; i < nr_cpus; i++) {
+ rec->cpu[i].processed = values[i].processed;
+ sum_processed += values[i].processed;
+ rec->cpu[i].issue = values[i].issue;
+ sum_issue += values[i].issue;
+ }
+ rec->total.processed = sum_processed;
+ rec->total.issue = sum_issue;
+ return true;
+}
+
+static void stats_collect(struct stats_record *rec)
+{
+ int fd, i, max_rxqs;
+
+ fd = bpf_map__fd(stats_global_map);
+ map_collect_percpu(fd, 0, &rec->stats);
+
+ fd = bpf_map__fd(rx_queue_index_map);
+ max_rxqs = bpf_map__max_entries(rx_queue_index_map);
+ for (i = 0; i < max_rxqs; i++)
+ map_collect_percpu(fd, i, &rec->rxq[i]);
+}
+
+static double calc_period(struct record *r, struct record *p)
+{
+ double period_ = 0;
+ __u64 period = 0;
+
+ period = r->timestamp - p->timestamp;
+ if (period > 0)
+ period_ = ((double) period / NANOSEC_PER_SEC);
+
+ return period_;
+}
+
+static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
+{
+ __u64 packets = 0;
+ __u64 pps = 0;
+
+ if (period_ > 0) {
+ packets = r->processed - p->processed;
+ pps = packets / period_;
+ }
+ return pps;
+}
+
+static __u64 calc_errs_pps(struct datarec *r,
+ struct datarec *p, double period_)
+{
+ __u64 packets = 0;
+ __u64 pps = 0;
+
+ if (period_ > 0) {
+ packets = r->issue - p->issue;
+ pps = packets / period_;
+ }
+ return pps;
+}
+
+static void stats_print(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ int action, __u32 cfg_opt)
+{
+ unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ double pps = 0, err = 0;
+ struct record *rec, *prev;
+ double t;
+ int rxq;
+ int i;
+
+ /* Header */
+ printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s options:%s\n",
+ ifname, ifindex, action2str(action), options2str(cfg_opt));
+
+ /* stats_global_map */
+ {
+ char *fmt_rx = "%-15s %-7d %'-11.0f %'-10.0f %s\n";
+ char *fm2_rx = "%-15s %-7s %'-11.0f\n";
+ char *errstr = "";
+
+ printf("%-15s %-7s %-11s %-11s\n",
+ "XDP stats", "CPU", "pps", "issue-pps");
+
+ rec = &stats_rec->stats;
+ prev = &stats_prev->stats;
+ t = calc_period(rec, prev);
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+
+ pps = calc_pps (r, p, t);
+ err = calc_errs_pps(r, p, t);
+ if (err > 0)
+ errstr = "invalid-ifindex";
+ if (pps > 0)
+ printf(fmt_rx, "XDP-RX CPU",
+ i, pps, err, errstr);
+ }
+ pps = calc_pps (&rec->total, &prev->total, t);
+ err = calc_errs_pps(&rec->total, &prev->total, t);
+ printf(fm2_rx, "XDP-RX CPU", "total", pps, err);
+ }
+
+ /* rx_queue_index_map */
+ printf("\n%-15s %-7s %-11s %-11s\n",
+ "RXQ stats", "RXQ:CPU", "pps", "issue-pps");
+
+ for (rxq = 0; rxq < nr_rxqs; rxq++) {
+ char *fmt_rx = "%-15s %3d:%-3d %'-11.0f %'-10.0f %s\n";
+ char *fm2_rx = "%-15s %3d:%-3s %'-11.0f\n";
+ char *errstr = "";
+ int rxq_ = rxq;
+
+ /* Last RXQ in map catch overflows */
+ if (rxq_ == nr_rxqs - 1)
+ rxq_ = -1;
+
+ rec = &stats_rec->rxq[rxq];
+ prev = &stats_prev->rxq[rxq];
+ t = calc_period(rec, prev);
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+
+ pps = calc_pps (r, p, t);
+ err = calc_errs_pps(r, p, t);
+ if (err > 0) {
+ if (rxq_ == -1)
+ errstr = "map-overflow-RXQ";
+ else
+ errstr = "err";
+ }
+ if (pps > 0)
+ printf(fmt_rx, "rx_queue_index",
+ rxq_, i, pps, err, errstr);
+ }
+ pps = calc_pps (&rec->total, &prev->total, t);
+ err = calc_errs_pps(&rec->total, &prev->total, t);
+ if (pps || err)
+ printf(fm2_rx, "rx_queue_index", rxq_, "sum", pps, err);
+ }
+}
+
+
+/* Pointer swap trick */
+static inline void swap(struct stats_record **a, struct stats_record **b)
+{
+ struct stats_record *tmp;
+
+ tmp = *a;
+ *a = *b;
+ *b = tmp;
+}
+
+static void stats_poll(int interval, int action, __u32 cfg_opt)
+{
+ struct stats_record *record, *prev;
+
+ record = alloc_stats_record();
+ prev = alloc_stats_record();
+ stats_collect(record);
+
+ while (1) {
+ swap(&prev, &record);
+ stats_collect(record);
+ stats_print(record, prev, action, cfg_opt);
+ sleep(interval);
+ }
+
+ free_stats_record(record);
+ free_stats_record(prev);
+}
+
+
+int main(int argc, char **argv)
+{
+ __u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int prog_fd, map_fd, opt, err;
+ bool use_separators = true;
+ struct config cfg = { 0 };
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ char filename[256];
+ int longindex = 0;
+ int interval = 2;
+ __u32 key = 0;
+
+
+ char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 };
+ int action = XDP_PASS; /* Default action */
+ char *action_str = NULL;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj))
+ return EXIT_FAIL;
+
+ prog = bpf_object__next_program(obj, NULL);
+ bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
+
+ err = bpf_object__load(obj);
+ if (err)
+ return EXIT_FAIL;
+ prog_fd = bpf_program__fd(prog);
+
+ map = bpf_object__find_map_by_name(obj, "config_map");
+ stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map");
+ rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map");
+ if (!map || !stats_global_map || !rx_queue_index_map) {
+ printf("finding a map in obj file failed\n");
+ return EXIT_FAIL;
+ }
+ map_fd = bpf_map__fd(map);
+
+ if (!prog_fd) {
+ fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n", strerror(errno));
+ return EXIT_FAIL;
+ }
+
+ /* Parse commands line args */
+ while ((opt = getopt_long(argc, argv, "FhSrmzd:s:a:",
+ long_options, &longindex)) != -1) {
+ switch (opt) {
+ case 'd':
+ if (strlen(optarg) >= IF_NAMESIZE) {
+ fprintf(stderr, "ERR: --dev name too long\n");
+ goto error;
+ }
+ ifname = (char *)&ifname_buf;
+ strncpy(ifname, optarg, IF_NAMESIZE);
+ ifindex = if_nametoindex(ifname);
+ if (ifindex == 0) {
+ fprintf(stderr,
+ "ERR: --dev name unknown err(%d):%s\n",
+ errno, strerror(errno));
+ goto error;
+ }
+ break;
+ case 's':
+ interval = atoi(optarg);
+ break;
+ case 'S':
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ case 'z':
+ use_separators = false;
+ break;
+ case 'a':
+ action_str = (char *)&action_str_buf;
+ strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN);
+ break;
+ case 'r':
+ cfg_options |= READ_MEM;
+ break;
+ case 'm':
+ cfg_options |= SWAP_MAC;
+ break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
+ case 'h':
+ error:
+ default:
+ usage(argv);
+ return EXIT_FAIL_OPTION;
+ }
+ }
+
+ if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
+ xdp_flags |= XDP_FLAGS_DRV_MODE;
+
+ /* Required option */
+ if (ifindex == -1) {
+ fprintf(stderr, "ERR: required option --dev missing\n");
+ usage(argv);
+ return EXIT_FAIL_OPTION;
+ }
+ cfg.ifindex = ifindex;
+
+ /* Parse action string */
+ if (action_str) {
+ action = parse_xdp_action(action_str);
+ if (action < 0) {
+ fprintf(stderr, "ERR: Invalid XDP --action: %s\n",
+ action_str);
+ list_xdp_actions();
+ return EXIT_FAIL_OPTION;
+ }
+ }
+ cfg.action = action;
+
+ /* XDP_TX requires changing MAC-addrs, else HW may drop */
+ if (action == XDP_TX)
+ cfg_options |= SWAP_MAC;
+ cfg.options = cfg_options;
+
+ /* Trick to pretty printf with thousands separators use %' */
+ if (use_separators)
+ setlocale(LC_NUMERIC, "en_US");
+
+ /* User-side setup ifindex in config_map */
+ err = bpf_map_update_elem(map_fd, &key, &cfg, 0);
+ if (err) {
+ fprintf(stderr, "Store config failed (err:%d)\n", err);
+ exit(EXIT_FAIL_BPF);
+ }
+
+ /* Remove XDP program when program is interrupted or killed */
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) {
+ fprintf(stderr, "link set xdp fd failed\n");
+ return EXIT_FAIL_XDP;
+ }
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
+ }
+ prog_id = info.id;
+
+ stats_poll(interval, action, cfg_options);
+ return EXIT_OK;
+}
diff --git a/samples/bpf/xdp_sample.bpf.c b/samples/bpf/xdp_sample.bpf.c
new file mode 100644
index 000000000..0eb7e1dca
--- /dev/null
+++ b/samples/bpf/xdp_sample.bpf.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/* GPLv2, Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */
+#include "xdp_sample.bpf.h"
+
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+array_map rx_cnt SEC(".maps");
+array_map redir_err_cnt SEC(".maps");
+array_map cpumap_enqueue_cnt SEC(".maps");
+array_map cpumap_kthread_cnt SEC(".maps");
+array_map exception_cnt SEC(".maps");
+array_map devmap_xmit_cnt SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 32 * 32);
+ __type(key, u64);
+ __type(value, struct datarec);
+} devmap_xmit_cnt_multi SEC(".maps");
+
+const volatile int nr_cpus = 0;
+
+/* These can be set before loading so that redundant comparisons can be DCE'd by
+ * the verifier, and only actual matches are tried after loading tp_btf program.
+ * This allows sample to filter tracepoint stats based on net_device.
+ */
+const volatile int from_match[32] = {};
+const volatile int to_match[32] = {};
+
+int cpumap_map_id = 0;
+
+/* Find if b is part of set a, but if a is empty set then evaluate to true */
+#define IN_SET(a, b) \
+ ({ \
+ bool __res = !(a)[0]; \
+ for (int i = 0; i < ARRAY_SIZE(a) && (a)[i]; i++) { \
+ __res = (a)[i] == (b); \
+ if (__res) \
+ break; \
+ } \
+ __res; \
+ })
+
+static __always_inline __u32 xdp_get_err_key(int err)
+{
+ switch (err) {
+ case 0:
+ return 0;
+ case -EINVAL:
+ return 2;
+ case -ENETDOWN:
+ return 3;
+ case -EMSGSIZE:
+ return 4;
+ case -EOPNOTSUPP:
+ return 5;
+ case -ENOSPC:
+ return 6;
+ default:
+ return 1;
+ }
+}
+
+static __always_inline int xdp_redirect_collect_stat(int from, int err)
+{
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 key = XDP_REDIRECT_ERROR;
+ struct datarec *rec;
+ u32 idx;
+
+ if (!IN_SET(from_match, from))
+ return 0;
+
+ key = xdp_get_err_key(err);
+
+ idx = key * nr_cpus + cpu;
+ rec = bpf_map_lookup_elem(&redir_err_cnt, &idx);
+ if (!rec)
+ return 0;
+ if (key)
+ NO_TEAR_INC(rec->dropped);
+ else
+ NO_TEAR_INC(rec->processed);
+ return 0; /* Indicate event was filtered (no further processing)*/
+ /*
+ * Returning 1 here would allow e.g. a perf-record tracepoint
+ * to see and record these events, but it doesn't work well
+ * in-practice as stopping perf-record also unload this
+ * bpf_prog. Plus, there is additional overhead of doing so.
+ */
+}
+
+SEC("tp_btf/xdp_redirect_err")
+int BPF_PROG(tp_xdp_redirect_err, const struct net_device *dev,
+ const struct bpf_prog *xdp, const void *tgt, int err,
+ const struct bpf_map *map, u32 index)
+{
+ return xdp_redirect_collect_stat(dev->ifindex, err);
+}
+
+SEC("tp_btf/xdp_redirect_map_err")
+int BPF_PROG(tp_xdp_redirect_map_err, const struct net_device *dev,
+ const struct bpf_prog *xdp, const void *tgt, int err,
+ const struct bpf_map *map, u32 index)
+{
+ return xdp_redirect_collect_stat(dev->ifindex, err);
+}
+
+SEC("tp_btf/xdp_redirect")
+int BPF_PROG(tp_xdp_redirect, const struct net_device *dev,
+ const struct bpf_prog *xdp, const void *tgt, int err,
+ const struct bpf_map *map, u32 index)
+{
+ return xdp_redirect_collect_stat(dev->ifindex, err);
+}
+
+SEC("tp_btf/xdp_redirect_map")
+int BPF_PROG(tp_xdp_redirect_map, const struct net_device *dev,
+ const struct bpf_prog *xdp, const void *tgt, int err,
+ const struct bpf_map *map, u32 index)
+{
+ return xdp_redirect_collect_stat(dev->ifindex, err);
+}
+
+SEC("tp_btf/xdp_cpumap_enqueue")
+int BPF_PROG(tp_xdp_cpumap_enqueue, int map_id, unsigned int processed,
+ unsigned int drops, int to_cpu)
+{
+ u32 cpu = bpf_get_smp_processor_id();
+ struct datarec *rec;
+ u32 idx;
+
+ if (cpumap_map_id && cpumap_map_id != map_id)
+ return 0;
+
+ idx = to_cpu * nr_cpus + cpu;
+ rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &idx);
+ if (!rec)
+ return 0;
+ NO_TEAR_ADD(rec->processed, processed);
+ NO_TEAR_ADD(rec->dropped, drops);
+ /* Record bulk events, then userspace can calc average bulk size */
+ if (processed > 0)
+ NO_TEAR_INC(rec->issue);
+ /* Inception: It's possible to detect overload situations, via
+ * this tracepoint. This can be used for creating a feedback
+ * loop to XDP, which can take appropriate actions to mitigate
+ * this overload situation.
+ */
+ return 0;
+}
+
+SEC("tp_btf/xdp_cpumap_kthread")
+int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed,
+ unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
+{
+ struct datarec *rec;
+ u32 cpu;
+
+ if (cpumap_map_id && cpumap_map_id != map_id)
+ return 0;
+
+ cpu = bpf_get_smp_processor_id();
+ rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &cpu);
+ if (!rec)
+ return 0;
+ NO_TEAR_ADD(rec->processed, processed);
+ NO_TEAR_ADD(rec->dropped, drops);
+ NO_TEAR_ADD(rec->xdp_pass, xdp_stats->pass);
+ NO_TEAR_ADD(rec->xdp_drop, xdp_stats->drop);
+ NO_TEAR_ADD(rec->xdp_redirect, xdp_stats->redirect);
+ /* Count times kthread yielded CPU via schedule call */
+ if (sched)
+ NO_TEAR_INC(rec->issue);
+ return 0;
+}
+
+SEC("tp_btf/xdp_exception")
+int BPF_PROG(tp_xdp_exception, const struct net_device *dev,
+ const struct bpf_prog *xdp, u32 act)
+{
+ u32 cpu = bpf_get_smp_processor_id();
+ struct datarec *rec;
+ u32 key = act, idx;
+
+ if (!IN_SET(from_match, dev->ifindex))
+ return 0;
+ if (!IN_SET(to_match, dev->ifindex))
+ return 0;
+
+ if (key > XDP_REDIRECT)
+ key = XDP_REDIRECT + 1;
+
+ idx = key * nr_cpus + cpu;
+ rec = bpf_map_lookup_elem(&exception_cnt, &idx);
+ if (!rec)
+ return 0;
+ NO_TEAR_INC(rec->dropped);
+
+ return 0;
+}
+
+SEC("tp_btf/xdp_devmap_xmit")
+int BPF_PROG(tp_xdp_devmap_xmit, const struct net_device *from_dev,
+ const struct net_device *to_dev, int sent, int drops, int err)
+{
+ struct datarec *rec;
+ int idx_in, idx_out;
+ u32 cpu;
+
+ idx_in = from_dev->ifindex;
+ idx_out = to_dev->ifindex;
+
+ if (!IN_SET(from_match, idx_in))
+ return 0;
+ if (!IN_SET(to_match, idx_out))
+ return 0;
+
+ cpu = bpf_get_smp_processor_id();
+ rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &cpu);
+ if (!rec)
+ return 0;
+ NO_TEAR_ADD(rec->processed, sent);
+ NO_TEAR_ADD(rec->dropped, drops);
+ /* Record bulk events, then userspace can calc average bulk size */
+ NO_TEAR_INC(rec->info);
+ /* Record error cases, where no frame were sent */
+ /* Catch API error of drv ndo_xdp_xmit sent more than count */
+ if (err || drops < 0)
+ NO_TEAR_INC(rec->issue);
+ return 0;
+}
+
+SEC("tp_btf/xdp_devmap_xmit")
+int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device *from_dev,
+ const struct net_device *to_dev, int sent, int drops, int err)
+{
+ struct datarec empty = {};
+ struct datarec *rec;
+ int idx_in, idx_out;
+ u64 idx;
+
+ idx_in = from_dev->ifindex;
+ idx_out = to_dev->ifindex;
+ idx = idx_in;
+ idx = idx << 32 | idx_out;
+
+ if (!IN_SET(from_match, idx_in))
+ return 0;
+ if (!IN_SET(to_match, idx_out))
+ return 0;
+
+ bpf_map_update_elem(&devmap_xmit_cnt_multi, &idx, &empty, BPF_NOEXIST);
+ rec = bpf_map_lookup_elem(&devmap_xmit_cnt_multi, &idx);
+ if (!rec)
+ return 0;
+
+ NO_TEAR_ADD(rec->processed, sent);
+ NO_TEAR_ADD(rec->dropped, drops);
+ NO_TEAR_INC(rec->info);
+ if (err || drops < 0)
+ NO_TEAR_INC(rec->issue);
+ return 0;
+}
diff --git a/samples/bpf/xdp_sample.bpf.h b/samples/bpf/xdp_sample.bpf.h
new file mode 100644
index 000000000..25b1dbe9b
--- /dev/null
+++ b/samples/bpf/xdp_sample.bpf.h
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _XDP_SAMPLE_BPF_H
+#define _XDP_SAMPLE_BPF_H
+
+#include "vmlinux.h"
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+#include "xdp_sample_shared.h"
+
+#define ETH_ALEN 6
+#define ETH_P_802_3_MIN 0x0600
+#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+#define ETH_P_ARP 0x0806
+#define IPPROTO_ICMPV6 58
+
+#define EINVAL 22
+#define ENETDOWN 100
+#define EMSGSIZE 90
+#define EOPNOTSUPP 95
+#define ENOSPC 28
+
+typedef struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __type(key, unsigned int);
+ __type(value, struct datarec);
+} array_map;
+
+extern array_map rx_cnt;
+extern const volatile int nr_cpus;
+
+enum {
+ XDP_REDIRECT_SUCCESS = 0,
+ XDP_REDIRECT_ERROR = 1
+};
+
+static __always_inline void swap_src_dst_mac(void *data)
+{
+ unsigned short *p = data;
+ unsigned short dst[3];
+
+ dst[0] = p[0];
+ dst[1] = p[1];
+ dst[2] = p[2];
+ p[0] = p[3];
+ p[1] = p[4];
+ p[2] = p[5];
+ p[3] = dst[0];
+ p[4] = dst[1];
+ p[5] = dst[2];
+}
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define bpf_ntohs(x) __builtin_bswap16(x)
+#define bpf_htons(x) __builtin_bswap16(x)
+#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define bpf_ntohs(x) (x)
+#define bpf_htons(x) (x)
+#else
+# error "Endianness detection needs to be set up for your compiler?!"
+#endif
+
+/*
+ * Note: including linux/compiler.h or linux/kernel.h for the macros below
+ * conflicts with vmlinux.h include in BPF files, so we define them here.
+ *
+ * Following functions are taken from kernel sources and
+ * break aliasing rules in their original form.
+ *
+ * While kernel is compiled with -fno-strict-aliasing,
+ * perf uses -Wstrict-aliasing=3 which makes build fail
+ * under gcc 4.4.
+ *
+ * Using extra __may_alias__ type to allow aliasing
+ * in this case.
+ */
+typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
+typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
+typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
+typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
+
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
+ case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
+ case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
+ case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
+ default:
+ asm volatile ("" : : : "memory");
+ __builtin_memcpy((void *)res, (const void *)p, size);
+ asm volatile ("" : : : "memory");
+ }
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
+ case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
+ case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
+ case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
+ default:
+ asm volatile ("" : : : "memory");
+ __builtin_memcpy((void *)p, (const void *)res, size);
+ asm volatile ("" : : : "memory");
+ }
+}
+
+#define READ_ONCE(x) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__c = { 0 } }; \
+ __read_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
+#define WRITE_ONCE(x, val) \
+({ \
+ union { typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (val) }; \
+ __write_once_size(&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
+/* Add a value using relaxed read and relaxed write. Less expensive than
+ * fetch_add when there is no write concurrency.
+ */
+#define NO_TEAR_ADD(x, val) WRITE_ONCE((x), READ_ONCE(x) + (val))
+#define NO_TEAR_INC(x) NO_TEAR_ADD((x), 1)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#endif
diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c
new file mode 100644
index 000000000..9cf76b340
--- /dev/null
+++ b/samples/bpf/xdp_sample_pkts_kern.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define SAMPLE_SIZE 64ul
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(u32));
+} my_map SEC(".maps");
+
+SEC("xdp_sample")
+int xdp_sample_prog(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+
+ /* Metadata will be in the perf event before the packet data. */
+ struct S {
+ u16 cookie;
+ u16 pkt_len;
+ } __packed metadata;
+
+ if (data < data_end) {
+ /* The XDP perf_event_output handler will use the upper 32 bits
+ * of the flags argument as a number of bytes to include of the
+ * packet payload in the event data. If the size is too big, the
+ * call to bpf_perf_event_output will fail and return -EFAULT.
+ *
+ * See bpf_xdp_event_output in net/core/filter.c.
+ *
+ * The BPF_F_CURRENT_CPU flag means that the event output fd
+ * will be indexed by the CPU number in the event map.
+ */
+ u64 flags = BPF_F_CURRENT_CPU;
+ u16 sample_size;
+ int ret;
+
+ metadata.cookie = 0xdead;
+ metadata.pkt_len = (u16)(data_end - data);
+ sample_size = min(metadata.pkt_len, SAMPLE_SIZE);
+ flags |= (u64)sample_size << 32;
+
+ ret = bpf_perf_event_output(ctx, &my_map, flags,
+ &metadata, sizeof(metadata));
+ if (ret)
+ bpf_printk("perf_event_output failed: %d\n", ret);
+ }
+
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
new file mode 100644
index 000000000..7df716323
--- /dev/null
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <net/if.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/sysinfo.h>
+#include <sys/ioctl.h>
+#include <signal.h>
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+#include <libgen.h>
+#include <linux/if_link.h>
+
+#include "perf-sys.h"
+
+static int if_idx;
+static char *if_name;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static __u32 prog_id;
+static struct perf_buffer *pb = NULL;
+
+static int do_attach(int idx, int fd, const char *name)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int err;
+
+ err = bpf_xdp_attach(idx, fd, xdp_flags, NULL);
+ if (err < 0) {
+ printf("ERROR: failed to attach program to %s\n", name);
+ return err;
+ }
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
+ }
+ prog_id = info.id;
+
+ return err;
+}
+
+static int do_detach(int idx, const char *name)
+{
+ __u32 curr_prog_id = 0;
+ int err = 0;
+
+ err = bpf_xdp_query_id(idx, xdp_flags, &curr_prog_id);
+ if (err) {
+ printf("bpf_xdp_query_id failed\n");
+ return err;
+ }
+ if (prog_id == curr_prog_id) {
+ err = bpf_xdp_detach(idx, xdp_flags, NULL);
+ if (err < 0)
+ printf("ERROR: failed to detach prog from %s\n", name);
+ } else if (!curr_prog_id) {
+ printf("couldn't find a prog id on a %s\n", name);
+ } else {
+ printf("program on interface changed, not removing\n");
+ }
+
+ return err;
+}
+
+#define SAMPLE_SIZE 64
+
+static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
+{
+ struct {
+ __u16 cookie;
+ __u16 pkt_len;
+ __u8 pkt_data[SAMPLE_SIZE];
+ } __packed *e = data;
+ int i;
+
+ if (e->cookie != 0xdead) {
+ printf("BUG cookie %x sized %d\n", e->cookie, size);
+ return;
+ }
+
+ printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len);
+ for (i = 0; i < 14 && i < e->pkt_len; i++)
+ printf("%02x ", e->pkt_data[i]);
+ printf("\n");
+}
+
+static void sig_handler(int signo)
+{
+ do_detach(if_idx, if_name);
+ perf_buffer__free(pb);
+ exit(0);
+}
+
+static void usage(const char *prog)
+{
+ fprintf(stderr,
+ "%s: %s [OPTS] <ifname|ifindex>\n\n"
+ "OPTS:\n"
+ " -F force loading prog\n"
+ " -S use skb-mode\n",
+ __func__, prog);
+}
+
+int main(int argc, char **argv)
+{
+ const char *optstr = "FS";
+ int prog_fd, map_fd, opt;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ char filename[256];
+ int ret, err;
+
+ while ((opt = getopt(argc, argv, optstr)) != -1) {
+ switch (opt) {
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
+ case 'S':
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ default:
+ usage(basename(argv[0]));
+ return 1;
+ }
+ }
+
+ if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
+ xdp_flags |= XDP_FLAGS_DRV_MODE;
+
+ if (optind == argc) {
+ usage(basename(argv[0]));
+ return 1;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj))
+ return 1;
+
+ prog = bpf_object__next_program(obj, NULL);
+ bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
+
+ err = bpf_object__load(obj);
+ if (err)
+ return 1;
+
+ prog_fd = bpf_program__fd(prog);
+
+ map = bpf_object__next_map(obj, NULL);
+ if (!map) {
+ printf("finding a map in obj file failed\n");
+ return 1;
+ }
+ map_fd = bpf_map__fd(map);
+
+ if_idx = if_nametoindex(argv[optind]);
+ if (!if_idx)
+ if_idx = strtoul(argv[optind], NULL, 0);
+
+ if (!if_idx) {
+ fprintf(stderr, "Invalid ifname\n");
+ return 1;
+ }
+ if_name = argv[optind];
+ err = do_attach(if_idx, prog_fd, if_name);
+ if (err)
+ return err;
+
+ if (signal(SIGINT, sig_handler) ||
+ signal(SIGHUP, sig_handler) ||
+ signal(SIGTERM, sig_handler)) {
+ perror("signal");
+ return 1;
+ }
+
+ pb = perf_buffer__new(map_fd, 8, print_bpf_output, NULL, NULL, NULL);
+ err = libbpf_get_error(pb);
+ if (err) {
+ perror("perf_buffer setup failed");
+ return 1;
+ }
+
+ while ((ret = perf_buffer__poll(pb, 1000)) >= 0) {
+ }
+
+ kill(0, SIGINT);
+ return ret;
+}
diff --git a/samples/bpf/xdp_sample_shared.h b/samples/bpf/xdp_sample_shared.h
new file mode 100644
index 000000000..8a7669a5d
--- /dev/null
+++ b/samples/bpf/xdp_sample_shared.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef _XDP_SAMPLE_SHARED_H
+#define _XDP_SAMPLE_SHARED_H
+
+struct datarec {
+ size_t processed;
+ size_t dropped;
+ size_t issue;
+ union {
+ size_t xdp_pass;
+ size_t info;
+ };
+ size_t xdp_drop;
+ size_t xdp_redirect;
+} __attribute__((aligned(64)));
+
+#endif
diff --git a/samples/bpf/xdp_sample_user.c b/samples/bpf/xdp_sample_user.c
new file mode 100644
index 000000000..158682852
--- /dev/null
+++ b/samples/bpf/xdp_sample_user.c
@@ -0,0 +1,1673 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <linux/ethtool.h>
+#include <linux/hashtable.h>
+#include <linux/if_link.h>
+#include <linux/jhash.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/sockios.h>
+#include <locale.h>
+#include <math.h>
+#include <net/if.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/signalfd.h>
+#include <sys/sysinfo.h>
+#include <sys/timerfd.h>
+#include <sys/utsname.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "bpf_util.h"
+#include "xdp_sample_user.h"
+
+#define __sample_print(fmt, cond, ...) \
+ ({ \
+ if (cond) \
+ printf(fmt, ##__VA_ARGS__); \
+ })
+
+#define print_always(fmt, ...) __sample_print(fmt, 1, ##__VA_ARGS__)
+#define print_default(fmt, ...) \
+ __sample_print(fmt, sample_log_level & LL_DEFAULT, ##__VA_ARGS__)
+#define __print_err(err, fmt, ...) \
+ ({ \
+ __sample_print(fmt, err > 0 || sample_log_level & LL_DEFAULT, \
+ ##__VA_ARGS__); \
+ sample_err_exp = sample_err_exp ? true : err > 0; \
+ })
+#define print_err(err, fmt, ...) __print_err(err, fmt, ##__VA_ARGS__)
+
+#define __COLUMN(x) "%'10" x " %-13s"
+#define FMT_COLUMNf __COLUMN(".0f")
+#define FMT_COLUMNd __COLUMN("d")
+#define FMT_COLUMNl __COLUMN("llu")
+#define RX(rx) rx, "rx/s"
+#define PPS(pps) pps, "pkt/s"
+#define DROP(drop) drop, "drop/s"
+#define ERR(err) err, "error/s"
+#define HITS(hits) hits, "hit/s"
+#define XMIT(xmit) xmit, "xmit/s"
+#define PASS(pass) pass, "pass/s"
+#define REDIR(redir) redir, "redir/s"
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+
+#define XDP_UNKNOWN (XDP_REDIRECT + 1)
+#define XDP_ACTION_MAX (XDP_UNKNOWN + 1)
+#define XDP_REDIRECT_ERR_MAX 7
+
+enum map_type {
+ MAP_RX,
+ MAP_REDIRECT_ERR,
+ MAP_CPUMAP_ENQUEUE,
+ MAP_CPUMAP_KTHREAD,
+ MAP_EXCEPTION,
+ MAP_DEVMAP_XMIT,
+ MAP_DEVMAP_XMIT_MULTI,
+ NUM_MAP,
+};
+
+enum log_level {
+ LL_DEFAULT = 1U << 0,
+ LL_SIMPLE = 1U << 1,
+ LL_DEBUG = 1U << 2,
+};
+
+struct record {
+ __u64 timestamp;
+ struct datarec total;
+ struct datarec *cpu;
+};
+
+struct map_entry {
+ struct hlist_node node;
+ __u64 pair;
+ struct record val;
+};
+
+struct stats_record {
+ struct record rx_cnt;
+ struct record redir_err[XDP_REDIRECT_ERR_MAX];
+ struct record kthread;
+ struct record exception[XDP_ACTION_MAX];
+ struct record devmap_xmit;
+ DECLARE_HASHTABLE(xmit_map, 5);
+ struct record enq[];
+};
+
+struct sample_output {
+ struct {
+ __u64 rx;
+ __u64 redir;
+ __u64 drop;
+ __u64 drop_xmit;
+ __u64 err;
+ __u64 xmit;
+ } totals;
+ struct {
+ union {
+ __u64 pps;
+ __u64 num;
+ };
+ __u64 drop;
+ __u64 err;
+ } rx_cnt;
+ struct {
+ __u64 suc;
+ __u64 err;
+ } redir_cnt;
+ struct {
+ __u64 hits;
+ } except_cnt;
+ struct {
+ __u64 pps;
+ __u64 drop;
+ __u64 err;
+ double bavg;
+ } xmit_cnt;
+};
+
+struct xdp_desc {
+ int ifindex;
+ __u32 prog_id;
+ int flags;
+} sample_xdp_progs[32];
+
+struct datarec *sample_mmap[NUM_MAP];
+struct bpf_map *sample_map[NUM_MAP];
+size_t sample_map_count[NUM_MAP];
+enum log_level sample_log_level;
+struct sample_output sample_out;
+unsigned long sample_interval;
+bool sample_err_exp;
+int sample_xdp_cnt;
+int sample_n_cpus;
+int sample_sig_fd;
+int sample_mask;
+
+static const char *xdp_redirect_err_names[XDP_REDIRECT_ERR_MAX] = {
+ /* Key=1 keeps unknown errors */
+ "Success",
+ "Unknown",
+ "EINVAL",
+ "ENETDOWN",
+ "EMSGSIZE",
+ "EOPNOTSUPP",
+ "ENOSPC",
+};
+
+/* Keyed from Unknown */
+static const char *xdp_redirect_err_help[XDP_REDIRECT_ERR_MAX - 1] = {
+ "Unknown error",
+ "Invalid redirection",
+ "Device being redirected to is down",
+ "Packet length too large for device",
+ "Operation not supported",
+ "No space in ptr_ring of cpumap kthread",
+};
+
+static const char *xdp_action_names[XDP_ACTION_MAX] = {
+ [XDP_ABORTED] = "XDP_ABORTED",
+ [XDP_DROP] = "XDP_DROP",
+ [XDP_PASS] = "XDP_PASS",
+ [XDP_TX] = "XDP_TX",
+ [XDP_REDIRECT] = "XDP_REDIRECT",
+ [XDP_UNKNOWN] = "XDP_UNKNOWN",
+};
+
+static __u64 gettime(void)
+{
+ struct timespec t;
+ int res;
+
+ res = clock_gettime(CLOCK_MONOTONIC, &t);
+ if (res < 0) {
+ fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
+ return UINT64_MAX;
+ }
+ return (__u64)t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
+}
+
+static const char *action2str(int action)
+{
+ if (action < XDP_ACTION_MAX)
+ return xdp_action_names[action];
+ return NULL;
+}
+
+static void sample_print_help(int mask)
+{
+ printf("Output format description\n\n"
+ "By default, redirect success statistics are disabled, use -s to enable.\n"
+ "The terse output mode is default, verbose mode can be activated using -v\n"
+ "Use SIGQUIT (Ctrl + \\) to switch the mode dynamically at runtime\n\n"
+ "Terse mode displays at most the following fields:\n"
+ " rx/s Number of packets received per second\n"
+ " redir/s Number of packets successfully redirected per second\n"
+ " err,drop/s Aggregated count of errors per second (including dropped packets)\n"
+ " xmit/s Number of packets transmitted on the output device per second\n\n"
+ "Output description for verbose mode:\n"
+ " FIELD DESCRIPTION\n");
+
+ if (mask & SAMPLE_RX_CNT) {
+ printf(" receive\t\tDisplays the number of packets received & errors encountered\n"
+ " \t\t\tWhenever an error or packet drop occurs, details of per CPU error\n"
+ " \t\t\tand drop statistics will be expanded inline in terse mode.\n"
+ " \t\t\t\tpkt/s - Packets received per second\n"
+ " \t\t\t\tdrop/s - Packets dropped per second\n"
+ " \t\t\t\terror/s - Errors encountered per second\n\n");
+ }
+ if (mask & (SAMPLE_REDIRECT_CNT | SAMPLE_REDIRECT_ERR_CNT)) {
+ printf(" redirect\t\tDisplays the number of packets successfully redirected\n"
+ " \t\t\tErrors encountered are expanded under redirect_err field\n"
+ " \t\t\tNote that passing -s to enable it has a per packet overhead\n"
+ " \t\t\t\tredir/s - Packets redirected successfully per second\n\n"
+ " redirect_err\t\tDisplays the number of packets that failed redirection\n"
+ " \t\t\tThe errno is expanded under this field with per CPU count\n"
+ " \t\t\tThe recognized errors are:\n");
+
+ for (int i = 2; i < XDP_REDIRECT_ERR_MAX; i++)
+ printf("\t\t\t %s: %s\n", xdp_redirect_err_names[i],
+ xdp_redirect_err_help[i - 1]);
+
+ printf(" \n\t\t\t\terror/s - Packets that failed redirection per second\n\n");
+ }
+
+ if (mask & SAMPLE_CPUMAP_ENQUEUE_CNT) {
+ printf(" enqueue to cpu N\tDisplays the number of packets enqueued to bulk queue of CPU N\n"
+ " \t\t\tExpands to cpu:FROM->N to display enqueue stats for each CPU enqueuing to CPU N\n"
+ " \t\t\tReceived packets can be associated with the CPU redirect program is enqueuing \n"
+ " \t\t\tpackets to.\n"
+ " \t\t\t\tpkt/s - Packets enqueued per second from other CPU to CPU N\n"
+ " \t\t\t\tdrop/s - Packets dropped when trying to enqueue to CPU N\n"
+ " \t\t\t\tbulk-avg - Average number of packets processed for each event\n\n");
+ }
+
+ if (mask & SAMPLE_CPUMAP_KTHREAD_CNT) {
+ printf(" kthread\t\tDisplays the number of packets processed in CPUMAP kthread for each CPU\n"
+ " \t\t\tPackets consumed from ptr_ring in kthread, and its xdp_stats (after calling \n"
+ " \t\t\tCPUMAP bpf prog) are expanded below this. xdp_stats are expanded as a total and\n"
+ " \t\t\tthen per-CPU to associate it to each CPU's pinned CPUMAP kthread.\n"
+ " \t\t\t\tpkt/s - Packets consumed per second from ptr_ring\n"
+ " \t\t\t\tdrop/s - Packets dropped per second in kthread\n"
+ " \t\t\t\tsched - Number of times kthread called schedule()\n\n"
+ " \t\t\txdp_stats (also expands to per-CPU counts)\n"
+ " \t\t\t\tpass/s - XDP_PASS count for CPUMAP program execution\n"
+ " \t\t\t\tdrop/s - XDP_DROP count for CPUMAP program execution\n"
+ " \t\t\t\tredir/s - XDP_REDIRECT count for CPUMAP program execution\n\n");
+ }
+
+ if (mask & SAMPLE_EXCEPTION_CNT) {
+ printf(" xdp_exception\t\tDisplays xdp_exception tracepoint events\n"
+ " \t\t\tThis can occur due to internal driver errors, unrecognized\n"
+ " \t\t\tXDP actions and due to explicit user trigger by use of XDP_ABORTED\n"
+ " \t\t\tEach action is expanded below this field with its count\n"
+ " \t\t\t\thit/s - Number of times the tracepoint was hit per second\n\n");
+ }
+
+ if (mask & SAMPLE_DEVMAP_XMIT_CNT) {
+ printf(" devmap_xmit\t\tDisplays devmap_xmit tracepoint events\n"
+ " \t\t\tThis tracepoint is invoked for successful transmissions on output\n"
+ " \t\t\tdevice but these statistics are not available for generic XDP mode,\n"
+ " \t\t\thence they will be omitted from the output when using SKB mode\n"
+ " \t\t\t\txmit/s - Number of packets that were transmitted per second\n"
+ " \t\t\t\tdrop/s - Number of packets that failed transmissions per second\n"
+ " \t\t\t\tdrv_err/s - Number of internal driver errors per second\n"
+ " \t\t\t\tbulk-avg - Average number of packets processed for each event\n\n");
+ }
+}
+
+void sample_usage(char *argv[], const struct option *long_options,
+ const char *doc, int mask, bool error)
+{
+ int i;
+
+ if (!error)
+ sample_print_help(mask);
+
+ printf("\n%s\nOption for %s:\n", doc, argv[0]);
+ for (i = 0; long_options[i].name != 0; i++) {
+ printf(" --%-15s", long_options[i].name);
+ if (long_options[i].flag != NULL)
+ printf(" flag (internal value: %d)",
+ *long_options[i].flag);
+ else
+ printf("\t short-option: -%c", long_options[i].val);
+ printf("\n");
+ }
+ printf("\n");
+}
+
+static struct datarec *alloc_record_per_cpu(void)
+{
+ unsigned int nr_cpus = libbpf_num_possible_cpus();
+ struct datarec *array;
+
+ array = calloc(nr_cpus, sizeof(*array));
+ if (!array) {
+ fprintf(stderr, "Failed to allocate memory (nr_cpus: %u)\n",
+ nr_cpus);
+ return NULL;
+ }
+ return array;
+}
+
+static int map_entry_init(struct map_entry *e, __u64 pair)
+{
+ e->pair = pair;
+ INIT_HLIST_NODE(&e->node);
+ e->val.timestamp = gettime();
+ e->val.cpu = alloc_record_per_cpu();
+ if (!e->val.cpu)
+ return -ENOMEM;
+ return 0;
+}
+
+static void map_collect_percpu(struct datarec *values, struct record *rec)
+{
+ /* For percpu maps, userspace gets a value per possible CPU */
+ unsigned int nr_cpus = libbpf_num_possible_cpus();
+ __u64 sum_xdp_redirect = 0;
+ __u64 sum_processed = 0;
+ __u64 sum_xdp_pass = 0;
+ __u64 sum_xdp_drop = 0;
+ __u64 sum_dropped = 0;
+ __u64 sum_issue = 0;
+ int i;
+
+ /* Get time as close as possible to reading map contents */
+ rec->timestamp = gettime();
+
+ /* Record and sum values from each CPU */
+ for (i = 0; i < nr_cpus; i++) {
+ rec->cpu[i].processed = READ_ONCE(values[i].processed);
+ rec->cpu[i].dropped = READ_ONCE(values[i].dropped);
+ rec->cpu[i].issue = READ_ONCE(values[i].issue);
+ rec->cpu[i].xdp_pass = READ_ONCE(values[i].xdp_pass);
+ rec->cpu[i].xdp_drop = READ_ONCE(values[i].xdp_drop);
+ rec->cpu[i].xdp_redirect = READ_ONCE(values[i].xdp_redirect);
+
+ sum_processed += rec->cpu[i].processed;
+ sum_dropped += rec->cpu[i].dropped;
+ sum_issue += rec->cpu[i].issue;
+ sum_xdp_pass += rec->cpu[i].xdp_pass;
+ sum_xdp_drop += rec->cpu[i].xdp_drop;
+ sum_xdp_redirect += rec->cpu[i].xdp_redirect;
+ }
+
+ rec->total.processed = sum_processed;
+ rec->total.dropped = sum_dropped;
+ rec->total.issue = sum_issue;
+ rec->total.xdp_pass = sum_xdp_pass;
+ rec->total.xdp_drop = sum_xdp_drop;
+ rec->total.xdp_redirect = sum_xdp_redirect;
+}
+
+static int map_collect_percpu_devmap(int map_fd, struct stats_record *rec)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ __u32 batch, count = 32;
+ struct datarec *values;
+ bool init = false;
+ __u64 *keys;
+ int i, ret;
+
+ keys = calloc(count, sizeof(__u64));
+ if (!keys)
+ return -ENOMEM;
+ values = calloc(count * nr_cpus, sizeof(struct datarec));
+ if (!values) {
+ free(keys);
+ return -ENOMEM;
+ }
+
+ for (;;) {
+ bool exit = false;
+
+ ret = bpf_map_lookup_batch(map_fd, init ? &batch : NULL, &batch,
+ keys, values, &count, NULL);
+ if (ret < 0 && errno != ENOENT)
+ break;
+ if (errno == ENOENT)
+ exit = true;
+
+ init = true;
+ for (i = 0; i < count; i++) {
+ struct map_entry *e, *x = NULL;
+ __u64 pair = keys[i];
+ struct datarec *arr;
+
+ arr = &values[i * nr_cpus];
+ hash_for_each_possible(rec->xmit_map, e, node, pair) {
+ if (e->pair == pair) {
+ x = e;
+ break;
+ }
+ }
+ if (!x) {
+ x = calloc(1, sizeof(*x));
+ if (!x)
+ goto cleanup;
+ if (map_entry_init(x, pair) < 0) {
+ free(x);
+ goto cleanup;
+ }
+ hash_add(rec->xmit_map, &x->node, pair);
+ }
+ map_collect_percpu(arr, &x->val);
+ }
+
+ if (exit)
+ break;
+ count = 32;
+ }
+
+ free(values);
+ free(keys);
+ return 0;
+cleanup:
+ free(values);
+ free(keys);
+ return -ENOMEM;
+}
+
+static struct stats_record *alloc_stats_record(void)
+{
+ struct stats_record *rec;
+ int i;
+
+ rec = calloc(1, sizeof(*rec) + sample_n_cpus * sizeof(struct record));
+ if (!rec) {
+ fprintf(stderr, "Failed to allocate memory\n");
+ return NULL;
+ }
+
+ if (sample_mask & SAMPLE_RX_CNT) {
+ rec->rx_cnt.cpu = alloc_record_per_cpu();
+ if (!rec->rx_cnt.cpu) {
+ fprintf(stderr,
+ "Failed to allocate rx_cnt per-CPU array\n");
+ goto end_rec;
+ }
+ }
+ if (sample_mask & (SAMPLE_REDIRECT_CNT | SAMPLE_REDIRECT_ERR_CNT)) {
+ for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) {
+ rec->redir_err[i].cpu = alloc_record_per_cpu();
+ if (!rec->redir_err[i].cpu) {
+ fprintf(stderr,
+ "Failed to allocate redir_err per-CPU array for "
+ "\"%s\" case\n",
+ xdp_redirect_err_names[i]);
+ while (i--)
+ free(rec->redir_err[i].cpu);
+ goto end_rx_cnt;
+ }
+ }
+ }
+ if (sample_mask & SAMPLE_CPUMAP_KTHREAD_CNT) {
+ rec->kthread.cpu = alloc_record_per_cpu();
+ if (!rec->kthread.cpu) {
+ fprintf(stderr,
+ "Failed to allocate kthread per-CPU array\n");
+ goto end_redir;
+ }
+ }
+ if (sample_mask & SAMPLE_EXCEPTION_CNT) {
+ for (i = 0; i < XDP_ACTION_MAX; i++) {
+ rec->exception[i].cpu = alloc_record_per_cpu();
+ if (!rec->exception[i].cpu) {
+ fprintf(stderr,
+ "Failed to allocate exception per-CPU array for "
+ "\"%s\" case\n",
+ action2str(i));
+ while (i--)
+ free(rec->exception[i].cpu);
+ goto end_kthread;
+ }
+ }
+ }
+ if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT) {
+ rec->devmap_xmit.cpu = alloc_record_per_cpu();
+ if (!rec->devmap_xmit.cpu) {
+ fprintf(stderr,
+ "Failed to allocate devmap_xmit per-CPU array\n");
+ goto end_exception;
+ }
+ }
+ if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
+ hash_init(rec->xmit_map);
+ if (sample_mask & SAMPLE_CPUMAP_ENQUEUE_CNT) {
+ for (i = 0; i < sample_n_cpus; i++) {
+ rec->enq[i].cpu = alloc_record_per_cpu();
+ if (!rec->enq[i].cpu) {
+ fprintf(stderr,
+ "Failed to allocate enqueue per-CPU array for "
+ "CPU %d\n",
+ i);
+ while (i--)
+ free(rec->enq[i].cpu);
+ goto end_devmap_xmit;
+ }
+ }
+ }
+
+ return rec;
+
+end_devmap_xmit:
+ free(rec->devmap_xmit.cpu);
+end_exception:
+ for (i = 0; i < XDP_ACTION_MAX; i++)
+ free(rec->exception[i].cpu);
+end_kthread:
+ free(rec->kthread.cpu);
+end_redir:
+ for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++)
+ free(rec->redir_err[i].cpu);
+end_rx_cnt:
+ free(rec->rx_cnt.cpu);
+end_rec:
+ free(rec);
+ return NULL;
+}
+
+static void free_stats_record(struct stats_record *r)
+{
+ struct hlist_node *tmp;
+ struct map_entry *e;
+ int i;
+
+ for (i = 0; i < sample_n_cpus; i++)
+ free(r->enq[i].cpu);
+ hash_for_each_safe(r->xmit_map, i, tmp, e, node) {
+ hash_del(&e->node);
+ free(e->val.cpu);
+ free(e);
+ }
+ free(r->devmap_xmit.cpu);
+ for (i = 0; i < XDP_ACTION_MAX; i++)
+ free(r->exception[i].cpu);
+ free(r->kthread.cpu);
+ for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++)
+ free(r->redir_err[i].cpu);
+ free(r->rx_cnt.cpu);
+ free(r);
+}
+
+static double calc_period(struct record *r, struct record *p)
+{
+ double period_ = 0;
+ __u64 period = 0;
+
+ period = r->timestamp - p->timestamp;
+ if (period > 0)
+ period_ = ((double)period / NANOSEC_PER_SEC);
+
+ return period_;
+}
+
+static double sample_round(double val)
+{
+ if (val - floor(val) < 0.5)
+ return floor(val);
+ return ceil(val);
+}
+
+static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
+{
+ __u64 packets = 0;
+ __u64 pps = 0;
+
+ if (period_ > 0) {
+ packets = r->processed - p->processed;
+ pps = sample_round(packets / period_);
+ }
+ return pps;
+}
+
+static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_)
+{
+ __u64 packets = 0;
+ __u64 pps = 0;
+
+ if (period_ > 0) {
+ packets = r->dropped - p->dropped;
+ pps = sample_round(packets / period_);
+ }
+ return pps;
+}
+
+static __u64 calc_errs_pps(struct datarec *r, struct datarec *p, double period_)
+{
+ __u64 packets = 0;
+ __u64 pps = 0;
+
+ if (period_ > 0) {
+ packets = r->issue - p->issue;
+ pps = sample_round(packets / period_);
+ }
+ return pps;
+}
+
+static __u64 calc_info_pps(struct datarec *r, struct datarec *p, double period_)
+{
+ __u64 packets = 0;
+ __u64 pps = 0;
+
+ if (period_ > 0) {
+ packets = r->info - p->info;
+ pps = sample_round(packets / period_);
+ }
+ return pps;
+}
+
+static void calc_xdp_pps(struct datarec *r, struct datarec *p, double *xdp_pass,
+ double *xdp_drop, double *xdp_redirect, double period_)
+{
+ *xdp_pass = 0, *xdp_drop = 0, *xdp_redirect = 0;
+ if (period_ > 0) {
+ *xdp_redirect = (r->xdp_redirect - p->xdp_redirect) / period_;
+ *xdp_pass = (r->xdp_pass - p->xdp_pass) / period_;
+ *xdp_drop = (r->xdp_drop - p->xdp_drop) / period_;
+ }
+}
+
+static void stats_get_rx_cnt(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ unsigned int nr_cpus, struct sample_output *out)
+{
+ struct record *rec, *prev;
+ double t, pps, drop, err;
+ int i;
+
+ rec = &stats_rec->rx_cnt;
+ prev = &stats_prev->rx_cnt;
+ t = calc_period(rec, prev);
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+ char str[64];
+
+ pps = calc_pps(r, p, t);
+ drop = calc_drop_pps(r, p, t);
+ err = calc_errs_pps(r, p, t);
+ if (!pps && !drop && !err)
+ continue;
+
+ snprintf(str, sizeof(str), "cpu:%d", i);
+ print_default(" %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
+ "\n",
+ str, PPS(pps), DROP(drop), ERR(err));
+ }
+
+ if (out) {
+ pps = calc_pps(&rec->total, &prev->total, t);
+ drop = calc_drop_pps(&rec->total, &prev->total, t);
+ err = calc_errs_pps(&rec->total, &prev->total, t);
+
+ out->rx_cnt.pps = pps;
+ out->rx_cnt.drop = drop;
+ out->rx_cnt.err = err;
+ out->totals.rx += pps;
+ out->totals.drop += drop;
+ out->totals.err += err;
+ }
+}
+
+static void stats_get_cpumap_enqueue(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ unsigned int nr_cpus)
+{
+ struct record *rec, *prev;
+ double t, pps, drop, err;
+ int i, to_cpu;
+
+ /* cpumap enqueue stats */
+ for (to_cpu = 0; to_cpu < sample_n_cpus; to_cpu++) {
+ rec = &stats_rec->enq[to_cpu];
+ prev = &stats_prev->enq[to_cpu];
+ t = calc_period(rec, prev);
+
+ pps = calc_pps(&rec->total, &prev->total, t);
+ drop = calc_drop_pps(&rec->total, &prev->total, t);
+ err = calc_errs_pps(&rec->total, &prev->total, t);
+
+ if (pps > 0 || drop > 0) {
+ char str[64];
+
+ snprintf(str, sizeof(str), "enqueue to cpu %d", to_cpu);
+
+ if (err > 0)
+ err = pps / err; /* calc average bulk size */
+
+ print_err(drop,
+ " %-20s " FMT_COLUMNf FMT_COLUMNf __COLUMN(
+ ".2f") "\n",
+ str, PPS(pps), DROP(drop), err, "bulk-avg");
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+ char str[64];
+
+ pps = calc_pps(r, p, t);
+ drop = calc_drop_pps(r, p, t);
+ err = calc_errs_pps(r, p, t);
+ if (!pps && !drop && !err)
+ continue;
+
+ snprintf(str, sizeof(str), "cpu:%d->%d", i, to_cpu);
+ if (err > 0)
+ err = pps / err; /* calc average bulk size */
+ print_default(
+ " %-18s " FMT_COLUMNf FMT_COLUMNf __COLUMN(
+ ".2f") "\n",
+ str, PPS(pps), DROP(drop), err, "bulk-avg");
+ }
+ }
+}
+
+static void stats_get_cpumap_remote(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ unsigned int nr_cpus)
+{
+ double xdp_pass, xdp_drop, xdp_redirect;
+ struct record *rec, *prev;
+ double t;
+ int i;
+
+ rec = &stats_rec->kthread;
+ prev = &stats_prev->kthread;
+ t = calc_period(rec, prev);
+
+ calc_xdp_pps(&rec->total, &prev->total, &xdp_pass, &xdp_drop,
+ &xdp_redirect, t);
+ if (xdp_pass || xdp_drop || xdp_redirect) {
+ print_err(xdp_drop,
+ " %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n",
+ "xdp_stats", PASS(xdp_pass), DROP(xdp_drop),
+ REDIR(xdp_redirect));
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+ char str[64];
+
+ calc_xdp_pps(r, p, &xdp_pass, &xdp_drop, &xdp_redirect, t);
+ if (!xdp_pass && !xdp_drop && !xdp_redirect)
+ continue;
+
+ snprintf(str, sizeof(str), "cpu:%d", i);
+ print_default(" %-16s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
+ "\n",
+ str, PASS(xdp_pass), DROP(xdp_drop),
+ REDIR(xdp_redirect));
+ }
+}
+
+static void stats_get_cpumap_kthread(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ unsigned int nr_cpus)
+{
+ struct record *rec, *prev;
+ double t, pps, drop, err;
+ int i;
+
+ rec = &stats_rec->kthread;
+ prev = &stats_prev->kthread;
+ t = calc_period(rec, prev);
+
+ pps = calc_pps(&rec->total, &prev->total, t);
+ drop = calc_drop_pps(&rec->total, &prev->total, t);
+ err = calc_errs_pps(&rec->total, &prev->total, t);
+
+ print_err(drop, " %-20s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n",
+ pps ? "kthread total" : "kthread", PPS(pps), DROP(drop), err,
+ "sched");
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+ char str[64];
+
+ pps = calc_pps(r, p, t);
+ drop = calc_drop_pps(r, p, t);
+ err = calc_errs_pps(r, p, t);
+ if (!pps && !drop && !err)
+ continue;
+
+ snprintf(str, sizeof(str), "cpu:%d", i);
+ print_default(" %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
+ "\n",
+ str, PPS(pps), DROP(drop), err, "sched");
+ }
+}
+
+static void stats_get_redirect_cnt(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ unsigned int nr_cpus,
+ struct sample_output *out)
+{
+ struct record *rec, *prev;
+ double t, pps;
+ int i;
+
+ rec = &stats_rec->redir_err[0];
+ prev = &stats_prev->redir_err[0];
+ t = calc_period(rec, prev);
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+ char str[64];
+
+ pps = calc_pps(r, p, t);
+ if (!pps)
+ continue;
+
+ snprintf(str, sizeof(str), "cpu:%d", i);
+ print_default(" %-18s " FMT_COLUMNf "\n", str, REDIR(pps));
+ }
+
+ if (out) {
+ pps = calc_pps(&rec->total, &prev->total, t);
+ out->redir_cnt.suc = pps;
+ out->totals.redir += pps;
+ }
+}
+
+static void stats_get_redirect_err_cnt(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ unsigned int nr_cpus,
+ struct sample_output *out)
+{
+ struct record *rec, *prev;
+ double t, drop, sum = 0;
+ int rec_i, i;
+
+ for (rec_i = 1; rec_i < XDP_REDIRECT_ERR_MAX; rec_i++) {
+ char str[64];
+
+ rec = &stats_rec->redir_err[rec_i];
+ prev = &stats_prev->redir_err[rec_i];
+ t = calc_period(rec, prev);
+
+ drop = calc_drop_pps(&rec->total, &prev->total, t);
+ if (drop > 0 && !out) {
+ snprintf(str, sizeof(str),
+ sample_log_level & LL_DEFAULT ? "%s total" :
+ "%s",
+ xdp_redirect_err_names[rec_i]);
+ print_err(drop, " %-18s " FMT_COLUMNf "\n", str,
+ ERR(drop));
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+ double drop;
+
+ drop = calc_drop_pps(r, p, t);
+ if (!drop)
+ continue;
+
+ snprintf(str, sizeof(str), "cpu:%d", i);
+ print_default(" %-16s" FMT_COLUMNf "\n", str,
+ ERR(drop));
+ }
+
+ sum += drop;
+ }
+
+ if (out) {
+ out->redir_cnt.err = sum;
+ out->totals.err += sum;
+ }
+}
+
+static void stats_get_exception_cnt(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ unsigned int nr_cpus,
+ struct sample_output *out)
+{
+ double t, drop, sum = 0;
+ struct record *rec, *prev;
+ int rec_i, i;
+
+ for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) {
+ rec = &stats_rec->exception[rec_i];
+ prev = &stats_prev->exception[rec_i];
+ t = calc_period(rec, prev);
+
+ drop = calc_drop_pps(&rec->total, &prev->total, t);
+ /* Fold out errors after heading */
+ sum += drop;
+
+ if (drop > 0 && !out) {
+ print_always(" %-18s " FMT_COLUMNf "\n",
+ action2str(rec_i), ERR(drop));
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+ char str[64];
+ double drop;
+
+ drop = calc_drop_pps(r, p, t);
+ if (!drop)
+ continue;
+
+ snprintf(str, sizeof(str), "cpu:%d", i);
+ print_default(" %-16s" FMT_COLUMNf "\n",
+ str, ERR(drop));
+ }
+ }
+ }
+
+ if (out) {
+ out->except_cnt.hits = sum;
+ out->totals.err += sum;
+ }
+}
+
+static void stats_get_devmap_xmit(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ unsigned int nr_cpus,
+ struct sample_output *out)
+{
+ double pps, drop, info, err;
+ struct record *rec, *prev;
+ double t;
+ int i;
+
+ rec = &stats_rec->devmap_xmit;
+ prev = &stats_prev->devmap_xmit;
+ t = calc_period(rec, prev);
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *r = &rec->cpu[i];
+ struct datarec *p = &prev->cpu[i];
+ char str[64];
+
+ pps = calc_pps(r, p, t);
+ drop = calc_drop_pps(r, p, t);
+ err = calc_errs_pps(r, p, t);
+
+ if (!pps && !drop && !err)
+ continue;
+
+ snprintf(str, sizeof(str), "cpu:%d", i);
+ info = calc_info_pps(r, p, t);
+ if (info > 0)
+ info = (pps + drop) / info; /* calc avg bulk */
+ print_default(" %-18s" FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
+ __COLUMN(".2f") "\n",
+ str, XMIT(pps), DROP(drop), err, "drv_err/s",
+ info, "bulk-avg");
+ }
+ if (out) {
+ pps = calc_pps(&rec->total, &prev->total, t);
+ drop = calc_drop_pps(&rec->total, &prev->total, t);
+ info = calc_info_pps(&rec->total, &prev->total, t);
+ if (info > 0)
+ info = (pps + drop) / info; /* calc avg bulk */
+ err = calc_errs_pps(&rec->total, &prev->total, t);
+
+ out->xmit_cnt.pps = pps;
+ out->xmit_cnt.drop = drop;
+ out->xmit_cnt.bavg = info;
+ out->xmit_cnt.err = err;
+ out->totals.xmit += pps;
+ out->totals.drop_xmit += drop;
+ out->totals.err += err;
+ }
+}
+
+static void stats_get_devmap_xmit_multi(struct stats_record *stats_rec,
+ struct stats_record *stats_prev,
+ unsigned int nr_cpus,
+ struct sample_output *out,
+ bool xmit_total)
+{
+ double pps, drop, info, err;
+ struct map_entry *entry;
+ struct record *r, *p;
+ double t;
+ int bkt;
+
+ hash_for_each(stats_rec->xmit_map, bkt, entry, node) {
+ struct map_entry *e, *x = NULL;
+ char ifname_from[IFNAMSIZ];
+ char ifname_to[IFNAMSIZ];
+ const char *fstr, *tstr;
+ unsigned long prev_time;
+ struct record beg = {};
+ __u32 from_idx, to_idx;
+ char str[128];
+ __u64 pair;
+ int i;
+
+ prev_time = sample_interval * NANOSEC_PER_SEC;
+
+ pair = entry->pair;
+ from_idx = pair >> 32;
+ to_idx = pair & 0xFFFFFFFF;
+
+ r = &entry->val;
+ beg.timestamp = r->timestamp - prev_time;
+
+ /* Find matching entry from stats_prev map */
+ hash_for_each_possible(stats_prev->xmit_map, e, node, pair) {
+ if (e->pair == pair) {
+ x = e;
+ break;
+ }
+ }
+ if (x)
+ p = &x->val;
+ else
+ p = &beg;
+ t = calc_period(r, p);
+ pps = calc_pps(&r->total, &p->total, t);
+ drop = calc_drop_pps(&r->total, &p->total, t);
+ info = calc_info_pps(&r->total, &p->total, t);
+ if (info > 0)
+ info = (pps + drop) / info; /* calc avg bulk */
+ err = calc_errs_pps(&r->total, &p->total, t);
+
+ if (out) {
+ /* We are responsible for filling out totals */
+ out->totals.xmit += pps;
+ out->totals.drop_xmit += drop;
+ out->totals.err += err;
+ continue;
+ }
+
+ fstr = tstr = NULL;
+ if (if_indextoname(from_idx, ifname_from))
+ fstr = ifname_from;
+ if (if_indextoname(to_idx, ifname_to))
+ tstr = ifname_to;
+
+ snprintf(str, sizeof(str), "xmit %s->%s", fstr ?: "?",
+ tstr ?: "?");
+ /* Skip idle streams of redirection */
+ if (pps || drop || err) {
+ print_err(drop,
+ " %-20s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
+ __COLUMN(".2f") "\n", str, XMIT(pps), DROP(drop),
+ err, "drv_err/s", info, "bulk-avg");
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ struct datarec *rc = &r->cpu[i];
+ struct datarec *pc, p_beg = {};
+ char str[64];
+
+ pc = p == &beg ? &p_beg : &p->cpu[i];
+
+ pps = calc_pps(rc, pc, t);
+ drop = calc_drop_pps(rc, pc, t);
+ err = calc_errs_pps(rc, pc, t);
+
+ if (!pps && !drop && !err)
+ continue;
+
+ snprintf(str, sizeof(str), "cpu:%d", i);
+ info = calc_info_pps(rc, pc, t);
+ if (info > 0)
+ info = (pps + drop) / info; /* calc avg bulk */
+
+ print_default(" %-18s" FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
+ __COLUMN(".2f") "\n", str, XMIT(pps),
+ DROP(drop), err, "drv_err/s", info, "bulk-avg");
+ }
+ }
+}
+
+static void stats_print(const char *prefix, int mask, struct stats_record *r,
+ struct stats_record *p, struct sample_output *out)
+{
+ int nr_cpus = libbpf_num_possible_cpus();
+ const char *str;
+
+ print_always("%-23s", prefix ?: "Summary");
+ if (mask & SAMPLE_RX_CNT)
+ print_always(FMT_COLUMNl, RX(out->totals.rx));
+ if (mask & SAMPLE_REDIRECT_CNT)
+ print_always(FMT_COLUMNl, REDIR(out->totals.redir));
+ printf(FMT_COLUMNl,
+ out->totals.err + out->totals.drop + out->totals.drop_xmit,
+ "err,drop/s");
+ if (mask & SAMPLE_DEVMAP_XMIT_CNT ||
+ mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
+ printf(FMT_COLUMNl, XMIT(out->totals.xmit));
+ printf("\n");
+
+ if (mask & SAMPLE_RX_CNT) {
+ str = (sample_log_level & LL_DEFAULT) && out->rx_cnt.pps ?
+ "receive total" :
+ "receive";
+ print_err((out->rx_cnt.err || out->rx_cnt.drop),
+ " %-20s " FMT_COLUMNl FMT_COLUMNl FMT_COLUMNl "\n",
+ str, PPS(out->rx_cnt.pps), DROP(out->rx_cnt.drop),
+ ERR(out->rx_cnt.err));
+
+ stats_get_rx_cnt(r, p, nr_cpus, NULL);
+ }
+
+ if (mask & SAMPLE_CPUMAP_ENQUEUE_CNT)
+ stats_get_cpumap_enqueue(r, p, nr_cpus);
+
+ if (mask & SAMPLE_CPUMAP_KTHREAD_CNT) {
+ stats_get_cpumap_kthread(r, p, nr_cpus);
+ stats_get_cpumap_remote(r, p, nr_cpus);
+ }
+
+ if (mask & SAMPLE_REDIRECT_CNT) {
+ str = out->redir_cnt.suc ? "redirect total" : "redirect";
+ print_default(" %-20s " FMT_COLUMNl "\n", str,
+ REDIR(out->redir_cnt.suc));
+
+ stats_get_redirect_cnt(r, p, nr_cpus, NULL);
+ }
+
+ if (mask & SAMPLE_REDIRECT_ERR_CNT) {
+ str = (sample_log_level & LL_DEFAULT) && out->redir_cnt.err ?
+ "redirect_err total" :
+ "redirect_err";
+ print_err(out->redir_cnt.err, " %-20s " FMT_COLUMNl "\n", str,
+ ERR(out->redir_cnt.err));
+
+ stats_get_redirect_err_cnt(r, p, nr_cpus, NULL);
+ }
+
+ if (mask & SAMPLE_EXCEPTION_CNT) {
+ str = out->except_cnt.hits ? "xdp_exception total" :
+ "xdp_exception";
+
+ print_err(out->except_cnt.hits, " %-20s " FMT_COLUMNl "\n", str,
+ HITS(out->except_cnt.hits));
+
+ stats_get_exception_cnt(r, p, nr_cpus, NULL);
+ }
+
+ if (mask & SAMPLE_DEVMAP_XMIT_CNT) {
+ str = (sample_log_level & LL_DEFAULT) && out->xmit_cnt.pps ?
+ "devmap_xmit total" :
+ "devmap_xmit";
+
+ print_err(out->xmit_cnt.err || out->xmit_cnt.drop,
+ " %-20s " FMT_COLUMNl FMT_COLUMNl FMT_COLUMNl
+ __COLUMN(".2f") "\n",
+ str, XMIT(out->xmit_cnt.pps),
+ DROP(out->xmit_cnt.drop), out->xmit_cnt.err,
+ "drv_err/s", out->xmit_cnt.bavg, "bulk-avg");
+
+ stats_get_devmap_xmit(r, p, nr_cpus, NULL);
+ }
+
+ if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
+ stats_get_devmap_xmit_multi(r, p, nr_cpus, NULL,
+ mask & SAMPLE_DEVMAP_XMIT_CNT);
+
+ if (sample_log_level & LL_DEFAULT ||
+ ((sample_log_level & LL_SIMPLE) && sample_err_exp)) {
+ sample_err_exp = false;
+ printf("\n");
+ }
+}
+
+int sample_setup_maps(struct bpf_map **maps)
+{
+ sample_n_cpus = libbpf_num_possible_cpus();
+
+ for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) {
+ sample_map[i] = maps[i];
+
+ switch (i) {
+ case MAP_RX:
+ case MAP_CPUMAP_KTHREAD:
+ case MAP_DEVMAP_XMIT:
+ sample_map_count[i] = sample_n_cpus;
+ break;
+ case MAP_REDIRECT_ERR:
+ sample_map_count[i] =
+ XDP_REDIRECT_ERR_MAX * sample_n_cpus;
+ break;
+ case MAP_EXCEPTION:
+ sample_map_count[i] = XDP_ACTION_MAX * sample_n_cpus;
+ case MAP_CPUMAP_ENQUEUE:
+ sample_map_count[i] = sample_n_cpus * sample_n_cpus;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (bpf_map__set_max_entries(sample_map[i], sample_map_count[i]) < 0)
+ return -errno;
+ }
+ sample_map[MAP_DEVMAP_XMIT_MULTI] = maps[MAP_DEVMAP_XMIT_MULTI];
+ return 0;
+}
+
+static int sample_setup_maps_mappings(void)
+{
+ for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) {
+ size_t size = sample_map_count[i] * sizeof(struct datarec);
+
+ sample_mmap[i] = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bpf_map__fd(sample_map[i]), 0);
+ if (sample_mmap[i] == MAP_FAILED)
+ return -errno;
+ }
+ return 0;
+}
+
+int __sample_init(int mask)
+{
+ sigset_t st;
+
+ sigemptyset(&st);
+ sigaddset(&st, SIGQUIT);
+ sigaddset(&st, SIGINT);
+ sigaddset(&st, SIGTERM);
+
+ if (sigprocmask(SIG_BLOCK, &st, NULL) < 0)
+ return -errno;
+
+ sample_sig_fd = signalfd(-1, &st, SFD_CLOEXEC | SFD_NONBLOCK);
+ if (sample_sig_fd < 0)
+ return -errno;
+
+ sample_mask = mask;
+
+ return sample_setup_maps_mappings();
+}
+
+static int __sample_remove_xdp(int ifindex, __u32 prog_id, int xdp_flags)
+{
+ __u32 cur_prog_id = 0;
+ int ret;
+
+ if (prog_id) {
+ ret = bpf_xdp_query_id(ifindex, xdp_flags, &cur_prog_id);
+ if (ret < 0)
+ return -errno;
+
+ if (prog_id != cur_prog_id) {
+ print_always(
+ "Program on ifindex %d does not match installed "
+ "program, skipping unload\n",
+ ifindex);
+ return -ENOENT;
+ }
+ }
+
+ return bpf_xdp_detach(ifindex, xdp_flags, NULL);
+}
+
+int sample_install_xdp(struct bpf_program *xdp_prog, int ifindex, bool generic,
+ bool force)
+{
+ int ret, xdp_flags = 0;
+ __u32 prog_id = 0;
+
+ if (sample_xdp_cnt == 32) {
+ fprintf(stderr,
+ "Total limit for installed XDP programs in a sample reached\n");
+ return -ENOTSUP;
+ }
+
+ xdp_flags |= !force ? XDP_FLAGS_UPDATE_IF_NOEXIST : 0;
+ xdp_flags |= generic ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
+ ret = bpf_xdp_attach(ifindex, bpf_program__fd(xdp_prog), xdp_flags, NULL);
+ if (ret < 0) {
+ ret = -errno;
+ fprintf(stderr,
+ "Failed to install program \"%s\" on ifindex %d, mode = %s, "
+ "force = %s: %s\n",
+ bpf_program__name(xdp_prog), ifindex,
+ generic ? "skb" : "native", force ? "true" : "false",
+ strerror(-ret));
+ return ret;
+ }
+
+ ret = bpf_xdp_query_id(ifindex, xdp_flags, &prog_id);
+ if (ret < 0) {
+ ret = -errno;
+ fprintf(stderr,
+ "Failed to get XDP program id for ifindex %d, removing program: %s\n",
+ ifindex, strerror(errno));
+ __sample_remove_xdp(ifindex, 0, xdp_flags);
+ return ret;
+ }
+ sample_xdp_progs[sample_xdp_cnt++] =
+ (struct xdp_desc){ ifindex, prog_id, xdp_flags };
+
+ return 0;
+}
+
+static void sample_summary_print(void)
+{
+ double num = sample_out.rx_cnt.num;
+
+ if (sample_out.totals.rx) {
+ double pkts = sample_out.totals.rx;
+
+ print_always(" Packets received : %'-10llu\n",
+ sample_out.totals.rx);
+ print_always(" Average packets/s : %'-10.0f\n",
+ sample_round(pkts / num));
+ }
+ if (sample_out.totals.redir) {
+ double pkts = sample_out.totals.redir;
+
+ print_always(" Packets redirected : %'-10llu\n",
+ sample_out.totals.redir);
+ print_always(" Average redir/s : %'-10.0f\n",
+ sample_round(pkts / num));
+ }
+ if (sample_out.totals.drop)
+ print_always(" Rx dropped : %'-10llu\n",
+ sample_out.totals.drop);
+ if (sample_out.totals.drop_xmit)
+ print_always(" Tx dropped : %'-10llu\n",
+ sample_out.totals.drop_xmit);
+ if (sample_out.totals.err)
+ print_always(" Errors recorded : %'-10llu\n",
+ sample_out.totals.err);
+ if (sample_out.totals.xmit) {
+ double pkts = sample_out.totals.xmit;
+
+ print_always(" Packets transmitted : %'-10llu\n",
+ sample_out.totals.xmit);
+ print_always(" Average transmit/s : %'-10.0f\n",
+ sample_round(pkts / num));
+ }
+}
+
+void sample_exit(int status)
+{
+ size_t size;
+
+ for (int i = 0; i < NUM_MAP; i++) {
+ size = sample_map_count[i] * sizeof(**sample_mmap);
+ munmap(sample_mmap[i], size);
+ }
+ while (sample_xdp_cnt--) {
+ int i = sample_xdp_cnt, ifindex, xdp_flags;
+ __u32 prog_id;
+
+ prog_id = sample_xdp_progs[i].prog_id;
+ ifindex = sample_xdp_progs[i].ifindex;
+ xdp_flags = sample_xdp_progs[i].flags;
+
+ __sample_remove_xdp(ifindex, prog_id, xdp_flags);
+ }
+ sample_summary_print();
+ close(sample_sig_fd);
+ exit(status);
+}
+
+static int sample_stats_collect(struct stats_record *rec)
+{
+ int i;
+
+ if (sample_mask & SAMPLE_RX_CNT)
+ map_collect_percpu(sample_mmap[MAP_RX], &rec->rx_cnt);
+
+ if (sample_mask & SAMPLE_REDIRECT_CNT)
+ map_collect_percpu(sample_mmap[MAP_REDIRECT_ERR], &rec->redir_err[0]);
+
+ if (sample_mask & SAMPLE_REDIRECT_ERR_CNT) {
+ for (i = 1; i < XDP_REDIRECT_ERR_MAX; i++)
+ map_collect_percpu(&sample_mmap[MAP_REDIRECT_ERR][i * sample_n_cpus],
+ &rec->redir_err[i]);
+ }
+
+ if (sample_mask & SAMPLE_CPUMAP_ENQUEUE_CNT)
+ for (i = 0; i < sample_n_cpus; i++)
+ map_collect_percpu(&sample_mmap[MAP_CPUMAP_ENQUEUE][i * sample_n_cpus],
+ &rec->enq[i]);
+
+ if (sample_mask & SAMPLE_CPUMAP_KTHREAD_CNT)
+ map_collect_percpu(sample_mmap[MAP_CPUMAP_KTHREAD],
+ &rec->kthread);
+
+ if (sample_mask & SAMPLE_EXCEPTION_CNT)
+ for (i = 0; i < XDP_ACTION_MAX; i++)
+ map_collect_percpu(&sample_mmap[MAP_EXCEPTION][i * sample_n_cpus],
+ &rec->exception[i]);
+
+ if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT)
+ map_collect_percpu(sample_mmap[MAP_DEVMAP_XMIT], &rec->devmap_xmit);
+
+ if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) {
+ if (map_collect_percpu_devmap(bpf_map__fd(sample_map[MAP_DEVMAP_XMIT_MULTI]), rec) < 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void sample_summary_update(struct sample_output *out)
+{
+ sample_out.totals.rx += out->totals.rx;
+ sample_out.totals.redir += out->totals.redir;
+ sample_out.totals.drop += out->totals.drop;
+ sample_out.totals.drop_xmit += out->totals.drop_xmit;
+ sample_out.totals.err += out->totals.err;
+ sample_out.totals.xmit += out->totals.xmit;
+ sample_out.rx_cnt.num++;
+}
+
+static void sample_stats_print(int mask, struct stats_record *cur,
+ struct stats_record *prev, char *prog_name)
+{
+ struct sample_output out = {};
+
+ if (mask & SAMPLE_RX_CNT)
+ stats_get_rx_cnt(cur, prev, 0, &out);
+ if (mask & SAMPLE_REDIRECT_CNT)
+ stats_get_redirect_cnt(cur, prev, 0, &out);
+ if (mask & SAMPLE_REDIRECT_ERR_CNT)
+ stats_get_redirect_err_cnt(cur, prev, 0, &out);
+ if (mask & SAMPLE_EXCEPTION_CNT)
+ stats_get_exception_cnt(cur, prev, 0, &out);
+ if (mask & SAMPLE_DEVMAP_XMIT_CNT)
+ stats_get_devmap_xmit(cur, prev, 0, &out);
+ else if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
+ stats_get_devmap_xmit_multi(cur, prev, 0, &out,
+ mask & SAMPLE_DEVMAP_XMIT_CNT);
+ sample_summary_update(&out);
+
+ stats_print(prog_name, mask, cur, prev, &out);
+}
+
+void sample_switch_mode(void)
+{
+ sample_log_level ^= LL_DEBUG - 1;
+}
+
+static int sample_signal_cb(void)
+{
+ struct signalfd_siginfo si;
+ int r;
+
+ r = read(sample_sig_fd, &si, sizeof(si));
+ if (r < 0)
+ return -errno;
+
+ switch (si.ssi_signo) {
+ case SIGQUIT:
+ sample_switch_mode();
+ printf("\n");
+ break;
+ default:
+ printf("\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Pointer swap trick */
+static void swap(struct stats_record **a, struct stats_record **b)
+{
+ struct stats_record *tmp;
+
+ tmp = *a;
+ *a = *b;
+ *b = tmp;
+}
+
+static int sample_timer_cb(int timerfd, struct stats_record **rec,
+ struct stats_record **prev)
+{
+ char line[64] = "Summary";
+ int ret;
+ __u64 t;
+
+ ret = read(timerfd, &t, sizeof(t));
+ if (ret < 0)
+ return -errno;
+
+ swap(prev, rec);
+ ret = sample_stats_collect(*rec);
+ if (ret < 0)
+ return ret;
+
+ if (sample_xdp_cnt == 2 && !(sample_mask & SAMPLE_SKIP_HEADING)) {
+ char fi[IFNAMSIZ];
+ char to[IFNAMSIZ];
+ const char *f, *t;
+
+ f = t = NULL;
+ if (if_indextoname(sample_xdp_progs[0].ifindex, fi))
+ f = fi;
+ if (if_indextoname(sample_xdp_progs[1].ifindex, to))
+ t = to;
+
+ snprintf(line, sizeof(line), "%s->%s", f ?: "?", t ?: "?");
+ }
+
+ sample_stats_print(sample_mask, *rec, *prev, line);
+ return 0;
+}
+
+int sample_run(int interval, void (*post_cb)(void *), void *ctx)
+{
+ struct timespec ts = { interval, 0 };
+ struct itimerspec its = { ts, ts };
+ struct stats_record *rec, *prev;
+ struct pollfd pfd[2] = {};
+ int timerfd, ret;
+
+ if (!interval) {
+ fprintf(stderr, "Incorrect interval 0\n");
+ return -EINVAL;
+ }
+ sample_interval = interval;
+ /* Pretty print numbers */
+ setlocale(LC_NUMERIC, "en_US.UTF-8");
+
+ timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK);
+ if (timerfd < 0)
+ return -errno;
+ timerfd_settime(timerfd, 0, &its, NULL);
+
+ pfd[0].fd = sample_sig_fd;
+ pfd[0].events = POLLIN;
+
+ pfd[1].fd = timerfd;
+ pfd[1].events = POLLIN;
+
+ ret = -ENOMEM;
+ rec = alloc_stats_record();
+ if (!rec)
+ goto end;
+ prev = alloc_stats_record();
+ if (!prev)
+ goto end_rec;
+
+ ret = sample_stats_collect(rec);
+ if (ret < 0)
+ goto end_rec_prev;
+
+ for (;;) {
+ ret = poll(pfd, 2, -1);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ else
+ break;
+ }
+
+ if (pfd[0].revents & POLLIN)
+ ret = sample_signal_cb();
+ else if (pfd[1].revents & POLLIN)
+ ret = sample_timer_cb(timerfd, &rec, &prev);
+
+ if (ret)
+ break;
+
+ if (post_cb)
+ post_cb(ctx);
+ }
+
+end_rec_prev:
+ free_stats_record(prev);
+end_rec:
+ free_stats_record(rec);
+end:
+ close(timerfd);
+
+ return ret;
+}
+
+const char *get_driver_name(int ifindex)
+{
+ struct ethtool_drvinfo drv = {};
+ char ifname[IF_NAMESIZE];
+ static char drvname[32];
+ struct ifreq ifr = {};
+ int fd, r = 0;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return "[error]";
+
+ if (!if_indextoname(ifindex, ifname))
+ goto end;
+
+ drv.cmd = ETHTOOL_GDRVINFO;
+ safe_strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
+ ifr.ifr_data = (void *)&drv;
+
+ r = ioctl(fd, SIOCETHTOOL, &ifr);
+ if (r)
+ goto end;
+
+ safe_strncpy(drvname, drv.driver, sizeof(drvname));
+
+ close(fd);
+ return drvname;
+
+end:
+ r = errno;
+ close(fd);
+ return r == EOPNOTSUPP ? "loopback" : "[error]";
+}
+
+int get_mac_addr(int ifindex, void *mac_addr)
+{
+ char ifname[IF_NAMESIZE];
+ struct ifreq ifr = {};
+ int fd, r;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return -errno;
+
+ if (!if_indextoname(ifindex, ifname)) {
+ r = -errno;
+ goto end;
+ }
+
+ safe_strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
+
+ r = ioctl(fd, SIOCGIFHWADDR, &ifr);
+ if (r) {
+ r = -errno;
+ goto end;
+ }
+
+ memcpy(mac_addr, ifr.ifr_hwaddr.sa_data, 6 * sizeof(char));
+
+end:
+ close(fd);
+ return r;
+}
+
+__attribute__((constructor)) static void sample_ctor(void)
+{
+ if (libbpf_set_strict_mode(LIBBPF_STRICT_ALL) < 0) {
+ fprintf(stderr, "Failed to set libbpf strict mode: %s\n",
+ strerror(errno));
+ /* Just exit, nothing to cleanup right now */
+ exit(EXIT_FAIL_BPF);
+ }
+}
diff --git a/samples/bpf/xdp_sample_user.h b/samples/bpf/xdp_sample_user.h
new file mode 100644
index 000000000..f45051679
--- /dev/null
+++ b/samples/bpf/xdp_sample_user.h
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef XDP_SAMPLE_USER_H
+#define XDP_SAMPLE_USER_H
+
+#include <bpf/libbpf.h>
+#include <linux/compiler.h>
+
+#include "xdp_sample_shared.h"
+
+enum stats_mask {
+ _SAMPLE_REDIRECT_MAP = 1U << 0,
+ SAMPLE_RX_CNT = 1U << 1,
+ SAMPLE_REDIRECT_ERR_CNT = 1U << 2,
+ SAMPLE_CPUMAP_ENQUEUE_CNT = 1U << 3,
+ SAMPLE_CPUMAP_KTHREAD_CNT = 1U << 4,
+ SAMPLE_EXCEPTION_CNT = 1U << 5,
+ SAMPLE_DEVMAP_XMIT_CNT = 1U << 6,
+ SAMPLE_REDIRECT_CNT = 1U << 7,
+ SAMPLE_REDIRECT_MAP_CNT = SAMPLE_REDIRECT_CNT | _SAMPLE_REDIRECT_MAP,
+ SAMPLE_REDIRECT_ERR_MAP_CNT = SAMPLE_REDIRECT_ERR_CNT | _SAMPLE_REDIRECT_MAP,
+ SAMPLE_DEVMAP_XMIT_CNT_MULTI = 1U << 8,
+ SAMPLE_SKIP_HEADING = 1U << 9,
+};
+
+/* Exit return codes */
+#define EXIT_OK 0
+#define EXIT_FAIL 1
+#define EXIT_FAIL_OPTION 2
+#define EXIT_FAIL_XDP 3
+#define EXIT_FAIL_BPF 4
+#define EXIT_FAIL_MEM 5
+
+int sample_setup_maps(struct bpf_map **maps);
+int __sample_init(int mask);
+void sample_exit(int status);
+int sample_run(int interval, void (*post_cb)(void *), void *ctx);
+
+void sample_switch_mode(void);
+int sample_install_xdp(struct bpf_program *xdp_prog, int ifindex, bool generic,
+ bool force);
+void sample_usage(char *argv[], const struct option *long_options,
+ const char *doc, int mask, bool error);
+
+const char *get_driver_name(int ifindex);
+int get_mac_addr(int ifindex, void *mac_addr);
+
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wstringop-truncation"
+#endif
+__attribute__((unused))
+static inline char *safe_strncpy(char *dst, const char *src, size_t size)
+{
+ if (!size)
+ return dst;
+ strncpy(dst, src, size - 1);
+ dst[size - 1] = '\0';
+ return dst;
+}
+#pragma GCC diagnostic pop
+
+#define __attach_tp(name) \
+ ({ \
+ if (bpf_program__type(skel->progs.name) != BPF_PROG_TYPE_TRACING)\
+ return -EINVAL; \
+ skel->links.name = bpf_program__attach(skel->progs.name); \
+ if (!skel->links.name) \
+ return -errno; \
+ })
+
+#define sample_init_pre_load(skel) \
+ ({ \
+ skel->rodata->nr_cpus = libbpf_num_possible_cpus(); \
+ sample_setup_maps((struct bpf_map *[]){ \
+ skel->maps.rx_cnt, skel->maps.redir_err_cnt, \
+ skel->maps.cpumap_enqueue_cnt, \
+ skel->maps.cpumap_kthread_cnt, \
+ skel->maps.exception_cnt, skel->maps.devmap_xmit_cnt, \
+ skel->maps.devmap_xmit_cnt_multi }); \
+ })
+
+#define DEFINE_SAMPLE_INIT(name) \
+ static int sample_init(struct name *skel, int mask) \
+ { \
+ int ret; \
+ ret = __sample_init(mask); \
+ if (ret < 0) \
+ return ret; \
+ if (mask & SAMPLE_REDIRECT_MAP_CNT) \
+ __attach_tp(tp_xdp_redirect_map); \
+ if (mask & SAMPLE_REDIRECT_CNT) \
+ __attach_tp(tp_xdp_redirect); \
+ if (mask & SAMPLE_REDIRECT_ERR_MAP_CNT) \
+ __attach_tp(tp_xdp_redirect_map_err); \
+ if (mask & SAMPLE_REDIRECT_ERR_CNT) \
+ __attach_tp(tp_xdp_redirect_err); \
+ if (mask & SAMPLE_CPUMAP_ENQUEUE_CNT) \
+ __attach_tp(tp_xdp_cpumap_enqueue); \
+ if (mask & SAMPLE_CPUMAP_KTHREAD_CNT) \
+ __attach_tp(tp_xdp_cpumap_kthread); \
+ if (mask & SAMPLE_EXCEPTION_CNT) \
+ __attach_tp(tp_xdp_exception); \
+ if (mask & SAMPLE_DEVMAP_XMIT_CNT) \
+ __attach_tp(tp_xdp_devmap_xmit); \
+ if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) \
+ __attach_tp(tp_xdp_devmap_xmit_multi); \
+ return 0; \
+ }
+
+#endif
diff --git a/samples/bpf/xdp_tx_iptunnel_common.h b/samples/bpf/xdp_tx_iptunnel_common.h
new file mode 100644
index 000000000..be839892c
--- /dev/null
+++ b/samples/bpf/xdp_tx_iptunnel_common.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016 Facebook
+ */
+#ifndef _SAMPLES_BPF_XDP_TX_IPTNL_COMMON_H
+#define _SAMPLES_BPF_XDP_TX_IPTNL_COMMON_H
+
+#include <linux/types.h>
+
+#define MAX_IPTNL_ENTRIES 256U
+
+struct vip {
+ union {
+ __u32 v6[4];
+ __u32 v4;
+ } daddr;
+ __u16 dport;
+ __u16 family;
+ __u8 protocol;
+};
+
+struct iptnl_info {
+ union {
+ __u32 v6[4];
+ __u32 v4;
+ } saddr;
+ union {
+ __u32 v6[4];
+ __u32 v4;
+ } daddr;
+ __u16 family;
+ __u8 dmac[6];
+};
+
+#endif
diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c
new file mode 100644
index 000000000..0e2bca3a3
--- /dev/null
+++ b/samples/bpf/xdp_tx_iptunnel_kern.c
@@ -0,0 +1,237 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program shows how to use bpf_xdp_adjust_head() by
+ * encapsulating the incoming packet in an IPv4/v6 header
+ * and then XDP_TX it out.
+ */
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <bpf/bpf_helpers.h>
+#include "xdp_tx_iptunnel_common.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(max_entries, 256);
+} rxcnt SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct vip);
+ __type(value, struct iptnl_info);
+ __uint(max_entries, MAX_IPTNL_ENTRIES);
+} vip2tnl SEC(".maps");
+
+static __always_inline void count_tx(u32 protocol)
+{
+ u64 *rxcnt_count;
+
+ rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol);
+ if (rxcnt_count)
+ *rxcnt_count += 1;
+}
+
+static __always_inline int get_dport(void *trans_data, void *data_end,
+ u8 protocol)
+{
+ struct tcphdr *th;
+ struct udphdr *uh;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ th = (struct tcphdr *)trans_data;
+ if (th + 1 > data_end)
+ return -1;
+ return th->dest;
+ case IPPROTO_UDP:
+ uh = (struct udphdr *)trans_data;
+ if (uh + 1 > data_end)
+ return -1;
+ return uh->dest;
+ default:
+ return 0;
+ }
+}
+
+static __always_inline void set_ethhdr(struct ethhdr *new_eth,
+ const struct ethhdr *old_eth,
+ const struct iptnl_info *tnl,
+ __be16 h_proto)
+{
+ memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source));
+ memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest));
+ new_eth->h_proto = h_proto;
+}
+
+static __always_inline int handle_ipv4(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct iptnl_info *tnl;
+ struct ethhdr *new_eth;
+ struct ethhdr *old_eth;
+ struct iphdr *iph = data + sizeof(struct ethhdr);
+ u16 *next_iph_u16;
+ u16 payload_len;
+ struct vip vip = {};
+ int dport;
+ u32 csum = 0;
+ int i;
+
+ if (iph + 1 > data_end)
+ return XDP_DROP;
+
+ dport = get_dport(iph + 1, data_end, iph->protocol);
+ if (dport == -1)
+ return XDP_DROP;
+
+ vip.protocol = iph->protocol;
+ vip.family = AF_INET;
+ vip.daddr.v4 = iph->daddr;
+ vip.dport = dport;
+ payload_len = ntohs(iph->tot_len);
+
+ tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
+ /* It only does v4-in-v4 */
+ if (!tnl || tnl->family != AF_INET)
+ return XDP_PASS;
+
+ /* The vip key is found. Add an IP header and send it out */
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
+ return XDP_DROP;
+
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+
+ new_eth = data;
+ iph = data + sizeof(*new_eth);
+ old_eth = data + sizeof(*iph);
+
+ if (new_eth + 1 > data_end ||
+ old_eth + 1 > data_end ||
+ iph + 1 > data_end)
+ return XDP_DROP;
+
+ set_ethhdr(new_eth, old_eth, tnl, htons(ETH_P_IP));
+
+ iph->version = 4;
+ iph->ihl = sizeof(*iph) >> 2;
+ iph->frag_off = 0;
+ iph->protocol = IPPROTO_IPIP;
+ iph->check = 0;
+ iph->tos = 0;
+ iph->tot_len = htons(payload_len + sizeof(*iph));
+ iph->daddr = tnl->daddr.v4;
+ iph->saddr = tnl->saddr.v4;
+ iph->ttl = 8;
+
+ next_iph_u16 = (u16 *)iph;
+#pragma clang loop unroll(full)
+ for (i = 0; i < sizeof(*iph) >> 1; i++)
+ csum += *next_iph_u16++;
+
+ iph->check = ~((csum & 0xffff) + (csum >> 16));
+
+ count_tx(vip.protocol);
+
+ return XDP_TX;
+}
+
+static __always_inline int handle_ipv6(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct iptnl_info *tnl;
+ struct ethhdr *new_eth;
+ struct ethhdr *old_eth;
+ struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
+ __u16 payload_len;
+ struct vip vip = {};
+ int dport;
+
+ if (ip6h + 1 > data_end)
+ return XDP_DROP;
+
+ dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr);
+ if (dport == -1)
+ return XDP_DROP;
+
+ vip.protocol = ip6h->nexthdr;
+ vip.family = AF_INET6;
+ memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr));
+ vip.dport = dport;
+ payload_len = ip6h->payload_len;
+
+ tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
+ /* It only does v6-in-v6 */
+ if (!tnl || tnl->family != AF_INET6)
+ return XDP_PASS;
+
+ /* The vip key is found. Add an IP header and send it out */
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
+ return XDP_DROP;
+
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+
+ new_eth = data;
+ ip6h = data + sizeof(*new_eth);
+ old_eth = data + sizeof(*ip6h);
+
+ if (new_eth + 1 > data_end ||
+ old_eth + 1 > data_end ||
+ ip6h + 1 > data_end)
+ return XDP_DROP;
+
+ set_ethhdr(new_eth, old_eth, tnl, htons(ETH_P_IPV6));
+
+ ip6h->version = 6;
+ ip6h->priority = 0;
+ memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
+ ip6h->payload_len = htons(ntohs(payload_len) + sizeof(*ip6h));
+ ip6h->nexthdr = IPPROTO_IPV6;
+ ip6h->hop_limit = 8;
+ memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6));
+ memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6));
+
+ count_tx(vip.protocol);
+
+ return XDP_TX;
+}
+
+SEC("xdp.frags")
+int _xdp_tx_iptunnel(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eth = data;
+ __u16 h_proto;
+
+ if (eth + 1 > data_end)
+ return XDP_DROP;
+
+ h_proto = eth->h_proto;
+
+ if (h_proto == htons(ETH_P_IP))
+ return handle_ipv4(xdp);
+ else if (h_proto == htons(ETH_P_IPV6))
+
+ return handle_ipv6(xdp);
+ else
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c
new file mode 100644
index 000000000..307baef68
--- /dev/null
+++ b/samples/bpf/xdp_tx_iptunnel_user.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 Facebook
+ */
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <net/if.h>
+#include <arpa/inet.h>
+#include <netinet/ether.h>
+#include <unistd.h>
+#include <time.h>
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+#include "bpf_util.h"
+#include "xdp_tx_iptunnel_common.h"
+
+#define STATS_INTERVAL_S 2U
+
+static int ifindex = -1;
+static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+static int rxcnt_map_fd;
+static __u32 prog_id;
+
+static void int_exit(int sig)
+{
+ __u32 curr_prog_id = 0;
+
+ if (ifindex > -1) {
+ if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
+ printf("bpf_xdp_query_id failed\n");
+ exit(1);
+ }
+ if (prog_id == curr_prog_id)
+ bpf_xdp_detach(ifindex, xdp_flags, NULL);
+ else if (!curr_prog_id)
+ printf("couldn't find a prog id on a given iface\n");
+ else
+ printf("program on interface changed, not removing\n");
+ }
+ exit(0);
+}
+
+/* simple per-protocol drop counter
+ */
+static void poll_stats(unsigned int kill_after_s)
+{
+ const unsigned int nr_protos = 256;
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ time_t started_at = time(NULL);
+ __u64 values[nr_cpus], prev[nr_protos][nr_cpus];
+ __u32 proto;
+ int i;
+
+ memset(prev, 0, sizeof(prev));
+
+ while (!kill_after_s || time(NULL) - started_at <= kill_after_s) {
+ sleep(STATS_INTERVAL_S);
+
+ for (proto = 0; proto < nr_protos; proto++) {
+ __u64 sum = 0;
+
+ assert(bpf_map_lookup_elem(rxcnt_map_fd, &proto,
+ values) == 0);
+ for (i = 0; i < nr_cpus; i++)
+ sum += (values[i] - prev[proto][i]);
+
+ if (sum)
+ printf("proto %u: sum:%10llu pkts, rate:%10llu pkts/s\n",
+ proto, sum, sum / STATS_INTERVAL_S);
+ memcpy(prev[proto], values, sizeof(values));
+ }
+ }
+}
+
+static void usage(const char *cmd)
+{
+ printf("Start a XDP prog which encapsulates incoming packets\n"
+ "in an IPv4/v6 header and XDP_TX it out. The dst <VIP:PORT>\n"
+ "is used to select packets to encapsulate\n\n");
+ printf("Usage: %s [...]\n", cmd);
+ printf(" -i <ifname|ifindex> Interface\n");
+ printf(" -a <vip-service-address> IPv4 or IPv6\n");
+ printf(" -p <vip-service-port> A port range (e.g. 433-444) is also allowed\n");
+ printf(" -s <source-ip> Used in the IPTunnel header\n");
+ printf(" -d <dest-ip> Used in the IPTunnel header\n");
+ printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n");
+ printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n");
+ printf(" -P <IP-Protocol> Default is TCP\n");
+ printf(" -S use skb-mode\n");
+ printf(" -N enforce native mode\n");
+ printf(" -F Force loading the XDP prog\n");
+ printf(" -h Display this help\n");
+}
+
+static int parse_ipstr(const char *ipstr, unsigned int *addr)
+{
+ if (inet_pton(AF_INET6, ipstr, addr) == 1) {
+ return AF_INET6;
+ } else if (inet_pton(AF_INET, ipstr, addr) == 1) {
+ addr[1] = addr[2] = addr[3] = 0;
+ return AF_INET;
+ }
+
+ fprintf(stderr, "%s is an invalid IP\n", ipstr);
+ return AF_UNSPEC;
+}
+
+static int parse_ports(const char *port_str, int *min_port, int *max_port)
+{
+ char *end;
+ long tmp_min_port;
+ long tmp_max_port;
+
+ tmp_min_port = strtol(optarg, &end, 10);
+ if (tmp_min_port < 1 || tmp_min_port > 65535) {
+ fprintf(stderr, "Invalid port(s):%s\n", optarg);
+ return 1;
+ }
+
+ if (*end == '-') {
+ end++;
+ tmp_max_port = strtol(end, NULL, 10);
+ if (tmp_max_port < 1 || tmp_max_port > 65535) {
+ fprintf(stderr, "Invalid port(s):%s\n", optarg);
+ return 1;
+ }
+ } else {
+ tmp_max_port = tmp_min_port;
+ }
+
+ if (tmp_min_port > tmp_max_port) {
+ fprintf(stderr, "Invalid port(s):%s\n", optarg);
+ return 1;
+ }
+
+ if (tmp_max_port - tmp_min_port + 1 > MAX_IPTNL_ENTRIES) {
+ fprintf(stderr, "Port range (%s) is larger than %u\n",
+ port_str, MAX_IPTNL_ENTRIES);
+ return 1;
+ }
+ *min_port = tmp_min_port;
+ *max_port = tmp_max_port;
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int min_port = 0, max_port = 0, vip2tnl_map_fd;
+ const char *optstr = "i:a:p:s:d:m:T:P:FSNh";
+ unsigned char opt_flags[256] = {};
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ unsigned int kill_after_s = 0;
+ struct iptnl_info tnl = {};
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct vip vip = {};
+ char filename[256];
+ int opt, prog_fd;
+ int i, err;
+
+ tnl.family = AF_UNSPEC;
+ vip.protocol = IPPROTO_TCP;
+
+ for (i = 0; i < strlen(optstr); i++)
+ if (optstr[i] != 'h' && 'a' <= optstr[i] && optstr[i] <= 'z')
+ opt_flags[(unsigned char)optstr[i]] = 1;
+
+ while ((opt = getopt(argc, argv, optstr)) != -1) {
+ unsigned short family;
+ unsigned int *v6;
+
+ switch (opt) {
+ case 'i':
+ ifindex = if_nametoindex(optarg);
+ if (!ifindex)
+ ifindex = atoi(optarg);
+ break;
+ case 'a':
+ vip.family = parse_ipstr(optarg, vip.daddr.v6);
+ if (vip.family == AF_UNSPEC)
+ return 1;
+ break;
+ case 'p':
+ if (parse_ports(optarg, &min_port, &max_port))
+ return 1;
+ break;
+ case 'P':
+ vip.protocol = atoi(optarg);
+ break;
+ case 's':
+ case 'd':
+ if (opt == 's')
+ v6 = tnl.saddr.v6;
+ else
+ v6 = tnl.daddr.v6;
+
+ family = parse_ipstr(optarg, v6);
+ if (family == AF_UNSPEC)
+ return 1;
+ if (tnl.family == AF_UNSPEC) {
+ tnl.family = family;
+ } else if (tnl.family != family) {
+ fprintf(stderr,
+ "The IP version of the src and dst addresses used in the IP encapsulation does not match\n");
+ return 1;
+ }
+ break;
+ case 'm':
+ if (!ether_aton_r(optarg,
+ (struct ether_addr *)tnl.dmac)) {
+ fprintf(stderr, "Invalid mac address:%s\n",
+ optarg);
+ return 1;
+ }
+ break;
+ case 'T':
+ kill_after_s = atoi(optarg);
+ break;
+ case 'S':
+ xdp_flags |= XDP_FLAGS_SKB_MODE;
+ break;
+ case 'N':
+ /* default, set below */
+ break;
+ case 'F':
+ xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
+ break;
+ default:
+ usage(argv[0]);
+ return 1;
+ }
+ opt_flags[opt] = 0;
+ }
+
+ if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
+ xdp_flags |= XDP_FLAGS_DRV_MODE;
+
+ for (i = 0; i < strlen(optstr); i++) {
+ if (opt_flags[(unsigned int)optstr[i]]) {
+ fprintf(stderr, "Missing argument -%c\n", optstr[i]);
+ usage(argv[0]);
+ return 1;
+ }
+ }
+
+ if (!ifindex) {
+ fprintf(stderr, "Invalid ifname\n");
+ return 1;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj))
+ return 1;
+
+ prog = bpf_object__next_program(obj, NULL);
+ bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
+
+ err = bpf_object__load(obj);
+ if (err) {
+ printf("bpf_object__load(): %s\n", strerror(errno));
+ return 1;
+ }
+ prog_fd = bpf_program__fd(prog);
+
+ rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt");
+ vip2tnl_map_fd = bpf_object__find_map_fd_by_name(obj, "vip2tnl");
+ if (vip2tnl_map_fd < 0 || rxcnt_map_fd < 0) {
+ printf("bpf_object__find_map_fd_by_name failed\n");
+ return 1;
+ }
+
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ while (min_port <= max_port) {
+ vip.dport = htons(min_port++);
+ if (bpf_map_update_elem(vip2tnl_map_fd, &vip, &tnl,
+ BPF_NOEXIST)) {
+ perror("bpf_map_update_elem(&vip2tnl)");
+ return 1;
+ }
+ }
+
+ if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) {
+ printf("link set xdp fd failed\n");
+ return 1;
+ }
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err) {
+ printf("can't get prog info - %s\n", strerror(errno));
+ return err;
+ }
+ prog_id = info.id;
+
+ poll_stats(kill_after_s);
+
+ bpf_xdp_detach(ifindex, xdp_flags, NULL);
+
+ return 0;
+}
diff --git a/samples/configfs/Makefile b/samples/configfs/Makefile
new file mode 100644
index 000000000..92d661fcb
--- /dev/null
+++ b/samples/configfs/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_SAMPLE_CONFIGFS) += configfs_sample.o
diff --git a/samples/configfs/configfs_sample.c b/samples/configfs/configfs_sample.c
new file mode 100644
index 000000000..37a657b25
--- /dev/null
+++ b/samples/configfs/configfs_sample.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * configfs_example_macros.c - This file is a demonstration module
+ * containing a number of configfs subsystems. It uses the helper
+ * macros defined by configfs.h
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/configfs.h>
+
+/*
+ * 01-childless
+ *
+ * This first example is a childless subsystem. It cannot create
+ * any config_items. It just has attributes.
+ *
+ * Note that we are enclosing the configfs_subsystem inside a container.
+ * This is not necessary if a subsystem has no attributes directly
+ * on the subsystem. See the next example, 02-simple-children, for
+ * such a subsystem.
+ */
+
+struct childless {
+ struct configfs_subsystem subsys;
+ int showme;
+ int storeme;
+};
+
+static inline struct childless *to_childless(struct config_item *item)
+{
+ return container_of(to_configfs_subsystem(to_config_group(item)),
+ struct childless, subsys);
+}
+
+static ssize_t childless_showme_show(struct config_item *item, char *page)
+{
+ struct childless *childless = to_childless(item);
+ ssize_t pos;
+
+ pos = sprintf(page, "%d\n", childless->showme);
+ childless->showme++;
+
+ return pos;
+}
+
+static ssize_t childless_storeme_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n", to_childless(item)->storeme);
+}
+
+static ssize_t childless_storeme_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct childless *childless = to_childless(item);
+ int ret;
+
+ ret = kstrtoint(page, 10, &childless->storeme);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t childless_description_show(struct config_item *item, char *page)
+{
+ return sprintf(page,
+"[01-childless]\n"
+"\n"
+"The childless subsystem is the simplest possible subsystem in\n"
+"configfs. It does not support the creation of child config_items.\n"
+"It only has a few attributes. In fact, it isn't much different\n"
+"than a directory in /proc.\n");
+}
+
+CONFIGFS_ATTR_RO(childless_, showme);
+CONFIGFS_ATTR(childless_, storeme);
+CONFIGFS_ATTR_RO(childless_, description);
+
+static struct configfs_attribute *childless_attrs[] = {
+ &childless_attr_showme,
+ &childless_attr_storeme,
+ &childless_attr_description,
+ NULL,
+};
+
+static const struct config_item_type childless_type = {
+ .ct_attrs = childless_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct childless childless_subsys = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "01-childless",
+ .ci_type = &childless_type,
+ },
+ },
+ },
+};
+
+/* ----------------------------------------------------------------- */
+
+/*
+ * 02-simple-children
+ *
+ * This example merely has a simple one-attribute child. Note that
+ * there is no extra attribute structure, as the child's attribute is
+ * known from the get-go. Also, there is no container for the
+ * subsystem, as it has no attributes of its own.
+ */
+
+struct simple_child {
+ struct config_item item;
+ int storeme;
+};
+
+static inline struct simple_child *to_simple_child(struct config_item *item)
+{
+ return container_of(item, struct simple_child, item);
+}
+
+static ssize_t simple_child_storeme_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n", to_simple_child(item)->storeme);
+}
+
+static ssize_t simple_child_storeme_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct simple_child *simple_child = to_simple_child(item);
+ int ret;
+
+ ret = kstrtoint(page, 10, &simple_child->storeme);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+CONFIGFS_ATTR(simple_child_, storeme);
+
+static struct configfs_attribute *simple_child_attrs[] = {
+ &simple_child_attr_storeme,
+ NULL,
+};
+
+static void simple_child_release(struct config_item *item)
+{
+ kfree(to_simple_child(item));
+}
+
+static struct configfs_item_operations simple_child_item_ops = {
+ .release = simple_child_release,
+};
+
+static const struct config_item_type simple_child_type = {
+ .ct_item_ops = &simple_child_item_ops,
+ .ct_attrs = simple_child_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+struct simple_children {
+ struct config_group group;
+};
+
+static inline struct simple_children *to_simple_children(struct config_item *item)
+{
+ return container_of(to_config_group(item),
+ struct simple_children, group);
+}
+
+static struct config_item *simple_children_make_item(struct config_group *group,
+ const char *name)
+{
+ struct simple_child *simple_child;
+
+ simple_child = kzalloc(sizeof(struct simple_child), GFP_KERNEL);
+ if (!simple_child)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&simple_child->item, name,
+ &simple_child_type);
+
+ return &simple_child->item;
+}
+
+static ssize_t simple_children_description_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page,
+"[02-simple-children]\n"
+"\n"
+"This subsystem allows the creation of child config_items. These\n"
+"items have only one attribute that is readable and writeable.\n");
+}
+
+CONFIGFS_ATTR_RO(simple_children_, description);
+
+static struct configfs_attribute *simple_children_attrs[] = {
+ &simple_children_attr_description,
+ NULL,
+};
+
+static void simple_children_release(struct config_item *item)
+{
+ kfree(to_simple_children(item));
+}
+
+static struct configfs_item_operations simple_children_item_ops = {
+ .release = simple_children_release,
+};
+
+/*
+ * Note that, since no extra work is required on ->drop_item(),
+ * no ->drop_item() is provided.
+ */
+static struct configfs_group_operations simple_children_group_ops = {
+ .make_item = simple_children_make_item,
+};
+
+static const struct config_item_type simple_children_type = {
+ .ct_item_ops = &simple_children_item_ops,
+ .ct_group_ops = &simple_children_group_ops,
+ .ct_attrs = simple_children_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem simple_children_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "02-simple-children",
+ .ci_type = &simple_children_type,
+ },
+ },
+};
+
+/* ----------------------------------------------------------------- */
+
+/*
+ * 03-group-children
+ *
+ * This example reuses the simple_children group from above. However,
+ * the simple_children group is not the subsystem itself, it is a
+ * child of the subsystem. Creation of a group in the subsystem creates
+ * a new simple_children group. That group can then have simple_child
+ * children of its own.
+ */
+
+static struct config_group *group_children_make_group(
+ struct config_group *group, const char *name)
+{
+ struct simple_children *simple_children;
+
+ simple_children = kzalloc(sizeof(struct simple_children),
+ GFP_KERNEL);
+ if (!simple_children)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&simple_children->group, name,
+ &simple_children_type);
+
+ return &simple_children->group;
+}
+
+static ssize_t group_children_description_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page,
+"[03-group-children]\n"
+"\n"
+"This subsystem allows the creation of child config_groups. These\n"
+"groups are like the subsystem simple-children.\n");
+}
+
+CONFIGFS_ATTR_RO(group_children_, description);
+
+static struct configfs_attribute *group_children_attrs[] = {
+ &group_children_attr_description,
+ NULL,
+};
+
+/*
+ * Note that, since no extra work is required on ->drop_item(),
+ * no ->drop_item() is provided.
+ */
+static struct configfs_group_operations group_children_group_ops = {
+ .make_group = group_children_make_group,
+};
+
+static const struct config_item_type group_children_type = {
+ .ct_group_ops = &group_children_group_ops,
+ .ct_attrs = group_children_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem group_children_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "03-group-children",
+ .ci_type = &group_children_type,
+ },
+ },
+};
+
+/* ----------------------------------------------------------------- */
+
+/*
+ * We're now done with our subsystem definitions.
+ * For convenience in this module, here's a list of them all. It
+ * allows the init function to easily register them. Most modules
+ * will only have one subsystem, and will only call register_subsystem
+ * on it directly.
+ */
+static struct configfs_subsystem *example_subsys[] = {
+ &childless_subsys.subsys,
+ &simple_children_subsys,
+ &group_children_subsys,
+ NULL,
+};
+
+static int __init configfs_example_init(void)
+{
+ struct configfs_subsystem *subsys;
+ int ret, i;
+
+ for (i = 0; example_subsys[i]; i++) {
+ subsys = example_subsys[i];
+
+ config_group_init(&subsys->su_group);
+ mutex_init(&subsys->su_mutex);
+ ret = configfs_register_subsystem(subsys);
+ if (ret) {
+ pr_err("Error %d while registering subsystem %s\n",
+ ret, subsys->su_group.cg_item.ci_namebuf);
+ goto out_unregister;
+ }
+ }
+
+ return 0;
+
+out_unregister:
+ for (i--; i >= 0; i--)
+ configfs_unregister_subsystem(example_subsys[i]);
+
+ return ret;
+}
+
+static void __exit configfs_example_exit(void)
+{
+ int i;
+
+ for (i = 0; example_subsys[i]; i++)
+ configfs_unregister_subsystem(example_subsys[i]);
+}
+
+module_init(configfs_example_init);
+module_exit(configfs_example_exit);
+MODULE_LICENSE("GPL");
diff --git a/samples/connector/.gitignore b/samples/connector/.gitignore
new file mode 100644
index 000000000..0e26039f3
--- /dev/null
+++ b/samples/connector/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/ucon
diff --git a/samples/connector/Makefile b/samples/connector/Makefile
new file mode 100644
index 000000000..d98a9e047
--- /dev/null
+++ b/samples/connector/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_SAMPLE_CONNECTOR) += cn_test.o
+
+userprogs-always-$(CONFIG_CC_CAN_LINK) += ucon
+
+userccflags += -I usr/include
diff --git a/samples/connector/cn_test.c b/samples/connector/cn_test.c
new file mode 100644
index 000000000..0958a171d
--- /dev/null
+++ b/samples/connector/cn_test.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * cn_test.c
+ *
+ * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
+ * All rights reserved.
+ */
+
+#define pr_fmt(fmt) "cn_test: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include <linux/connector.h>
+
+static struct cb_id cn_test_id = { CN_NETLINK_USERS + 3, 0x456 };
+static char cn_test_name[] = "cn_test";
+static struct sock *nls;
+static struct timer_list cn_test_timer;
+
+static void cn_test_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+{
+ pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n",
+ __func__, jiffies, msg->id.idx, msg->id.val,
+ msg->seq, msg->ack, msg->len,
+ msg->len ? (char *)msg->data : "");
+}
+
+/*
+ * Do not remove this function even if no one is using it as
+ * this is an example of how to get notifications about new
+ * connector user registration
+ */
+#if 0
+static int cn_test_want_notify(void)
+{
+ struct cn_ctl_msg *ctl;
+ struct cn_notify_req *req;
+ struct cn_msg *msg = NULL;
+ int size, size0;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ u32 group = 1;
+
+ size0 = sizeof(*msg) + sizeof(*ctl) + 3 * sizeof(*req);
+
+ size = NLMSG_SPACE(size0);
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb) {
+ pr_err("failed to allocate new skb with size=%u\n", size);
+ return -ENOMEM;
+ }
+
+ nlh = nlmsg_put(skb, 0, 0x123, NLMSG_DONE, size - sizeof(*nlh), 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ msg = nlmsg_data(nlh);
+
+ memset(msg, 0, size0);
+
+ msg->id.idx = -1;
+ msg->id.val = -1;
+ msg->seq = 0x123;
+ msg->ack = 0x345;
+ msg->len = size0 - sizeof(*msg);
+
+ ctl = (struct cn_ctl_msg *)(msg + 1);
+
+ ctl->idx_notify_num = 1;
+ ctl->val_notify_num = 2;
+ ctl->group = group;
+ ctl->len = msg->len - sizeof(*ctl);
+
+ req = (struct cn_notify_req *)(ctl + 1);
+
+ /*
+ * Idx.
+ */
+ req->first = cn_test_id.idx;
+ req->range = 10;
+
+ /*
+ * Val 0.
+ */
+ req++;
+ req->first = cn_test_id.val;
+ req->range = 10;
+
+ /*
+ * Val 1.
+ */
+ req++;
+ req->first = cn_test_id.val + 20;
+ req->range = 10;
+
+ NETLINK_CB(skb).dst_group = ctl->group;
+ //netlink_broadcast(nls, skb, 0, ctl->group, GFP_ATOMIC);
+ netlink_unicast(nls, skb, 0, 0);
+
+ pr_info("request was sent: group=0x%x\n", ctl->group);
+
+ return 0;
+}
+#endif
+
+static u32 cn_test_timer_counter;
+static void cn_test_timer_func(struct timer_list *unused)
+{
+ struct cn_msg *m;
+ char data[32];
+
+ pr_debug("%s: timer fired\n", __func__);
+
+ m = kzalloc(sizeof(*m) + sizeof(data), GFP_ATOMIC);
+ if (m) {
+
+ memcpy(&m->id, &cn_test_id, sizeof(m->id));
+ m->seq = cn_test_timer_counter;
+ m->len = sizeof(data);
+
+ m->len =
+ scnprintf(data, sizeof(data), "counter = %u",
+ cn_test_timer_counter) + 1;
+
+ memcpy(m + 1, data, m->len);
+
+ cn_netlink_send(m, 0, 0, GFP_ATOMIC);
+ kfree(m);
+ }
+
+ cn_test_timer_counter++;
+
+ mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000));
+}
+
+static int cn_test_init(void)
+{
+ int err;
+
+ err = cn_add_callback(&cn_test_id, cn_test_name, cn_test_callback);
+ if (err)
+ goto err_out;
+ cn_test_id.val++;
+ err = cn_add_callback(&cn_test_id, cn_test_name, cn_test_callback);
+ if (err) {
+ cn_del_callback(&cn_test_id);
+ goto err_out;
+ }
+
+ timer_setup(&cn_test_timer, cn_test_timer_func, 0);
+ mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000));
+
+ pr_info("initialized with id={%u.%u}\n",
+ cn_test_id.idx, cn_test_id.val);
+
+ return 0;
+
+ err_out:
+ if (nls && nls->sk_socket)
+ sock_release(nls->sk_socket);
+
+ return err;
+}
+
+static void cn_test_fini(void)
+{
+ del_timer_sync(&cn_test_timer);
+ cn_del_callback(&cn_test_id);
+ cn_test_id.val--;
+ cn_del_callback(&cn_test_id);
+ if (nls && nls->sk_socket)
+ sock_release(nls->sk_socket);
+}
+
+module_init(cn_test_init);
+module_exit(cn_test_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+MODULE_DESCRIPTION("Connector's test module");
diff --git a/samples/connector/ucon.c b/samples/connector/ucon.c
new file mode 100644
index 000000000..fa17f8642
--- /dev/null
+++ b/samples/connector/ucon.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ucon.c
+ *
+ * Copyright (c) 2004+ Evgeniy Polyakov <zbr@ioremap.net>
+ */
+
+#include <asm/types.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/poll.h>
+
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+
+#include <arpa/inet.h>
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <time.h>
+#include <getopt.h>
+
+#include <linux/connector.h>
+
+#define DEBUG
+#define NETLINK_CONNECTOR 11
+
+/* Hopefully your userspace connector.h matches this kernel */
+#define CN_TEST_IDX CN_NETLINK_USERS + 3
+#define CN_TEST_VAL 0x456
+
+#ifdef DEBUG
+#define ulog(f, a...) fprintf(stdout, f, ##a)
+#else
+#define ulog(f, a...) do {} while (0)
+#endif
+
+static int need_exit;
+static __u32 seq;
+
+static int netlink_send(int s, struct cn_msg *msg)
+{
+ struct nlmsghdr *nlh;
+ unsigned int size;
+ int err;
+ char buf[128];
+ struct cn_msg *m;
+
+ size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len);
+
+ nlh = (struct nlmsghdr *)buf;
+ nlh->nlmsg_seq = seq++;
+ nlh->nlmsg_pid = getpid();
+ nlh->nlmsg_type = NLMSG_DONE;
+ nlh->nlmsg_len = size;
+ nlh->nlmsg_flags = 0;
+
+ m = NLMSG_DATA(nlh);
+#if 0
+ ulog("%s: [%08x.%08x] len=%u, seq=%u, ack=%u.\n",
+ __func__, msg->id.idx, msg->id.val, msg->len, msg->seq, msg->ack);
+#endif
+ memcpy(m, msg, sizeof(*m) + msg->len);
+
+ err = send(s, nlh, size, 0);
+ if (err == -1)
+ ulog("Failed to send: %s [%d].\n",
+ strerror(errno), errno);
+
+ return err;
+}
+
+static void usage(void)
+{
+ printf(
+ "Usage: ucon [options] [output file]\n"
+ "\n"
+ "\t-h\tthis help screen\n"
+ "\t-s\tsend buffers to the test module\n"
+ "\n"
+ "The default behavior of ucon is to subscribe to the test module\n"
+ "and wait for state messages. Any ones received are dumped to the\n"
+ "specified output file (or stdout). The test module is assumed to\n"
+ "have an id of {%u.%u}\n"
+ "\n"
+ "If you get no output, then verify the cn_test module id matches\n"
+ "the expected id above.\n"
+ , CN_TEST_IDX, CN_TEST_VAL
+ );
+}
+
+int main(int argc, char *argv[])
+{
+ int s;
+ char buf[1024];
+ int len;
+ struct nlmsghdr *reply;
+ struct sockaddr_nl l_local;
+ struct cn_msg *data;
+ FILE *out;
+ time_t tm;
+ struct pollfd pfd;
+ bool send_msgs = false;
+
+ while ((s = getopt(argc, argv, "hs")) != -1) {
+ switch (s) {
+ case 's':
+ send_msgs = true;
+ break;
+
+ case 'h':
+ usage();
+ return 0;
+
+ default:
+ /* getopt() outputs an error for us */
+ usage();
+ return 1;
+ }
+ }
+
+ if (argc != optind) {
+ out = fopen(argv[optind], "a+");
+ if (!out) {
+ ulog("Unable to open %s for writing: %s\n",
+ argv[1], strerror(errno));
+ out = stdout;
+ }
+ } else
+ out = stdout;
+
+ memset(buf, 0, sizeof(buf));
+
+ s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
+ if (s == -1) {
+ perror("socket");
+ return -1;
+ }
+
+ l_local.nl_family = AF_NETLINK;
+ l_local.nl_groups = -1; /* bitmask of requested groups */
+ l_local.nl_pid = 0;
+
+ ulog("subscribing to %u.%u\n", CN_TEST_IDX, CN_TEST_VAL);
+
+ if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) {
+ perror("bind");
+ close(s);
+ return -1;
+ }
+
+#if 0
+ {
+ int on = 0x57; /* Additional group number */
+ setsockopt(s, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &on, sizeof(on));
+ }
+#endif
+ if (send_msgs) {
+ int i, j;
+
+ memset(buf, 0, sizeof(buf));
+
+ data = (struct cn_msg *)buf;
+
+ data->id.idx = CN_TEST_IDX;
+ data->id.val = CN_TEST_VAL;
+ data->seq = seq++;
+ data->ack = 0;
+ data->len = 0;
+
+ for (j=0; j<10; ++j) {
+ for (i=0; i<1000; ++i) {
+ len = netlink_send(s, data);
+ }
+
+ ulog("%d messages have been sent to %08x.%08x.\n", i, data->id.idx, data->id.val);
+ }
+
+ return 0;
+ }
+
+
+ pfd.fd = s;
+
+ while (!need_exit) {
+ pfd.events = POLLIN;
+ pfd.revents = 0;
+ switch (poll(&pfd, 1, -1)) {
+ case 0:
+ need_exit = 1;
+ break;
+ case -1:
+ if (errno != EINTR) {
+ need_exit = 1;
+ break;
+ }
+ continue;
+ }
+ if (need_exit)
+ break;
+
+ memset(buf, 0, sizeof(buf));
+ len = recv(s, buf, sizeof(buf), 0);
+ if (len == -1) {
+ perror("recv buf");
+ close(s);
+ return -1;
+ }
+ reply = (struct nlmsghdr *)buf;
+
+ switch (reply->nlmsg_type) {
+ case NLMSG_ERROR:
+ fprintf(out, "Error message received.\n");
+ fflush(out);
+ break;
+ case NLMSG_DONE:
+ data = (struct cn_msg *)NLMSG_DATA(reply);
+
+ time(&tm);
+ fprintf(out, "%.24s : [%x.%x] [%08u.%08u].\n",
+ ctime(&tm), data->id.idx, data->id.val, data->seq, data->ack);
+ fflush(out);
+ break;
+ default:
+ break;
+ }
+ }
+
+ close(s);
+ return 0;
+}
diff --git a/samples/coresight/Makefile b/samples/coresight/Makefile
new file mode 100644
index 000000000..b3fce4af2
--- /dev/null
+++ b/samples/coresight/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_SAMPLE_CORESIGHT_SYSCFG) += coresight-cfg-sample.o
+ccflags-y += -I$(srctree)/drivers/hwtracing/coresight
diff --git a/samples/coresight/coresight-cfg-sample.c b/samples/coresight/coresight-cfg-sample.c
new file mode 100644
index 000000000..25485c80b
--- /dev/null
+++ b/samples/coresight/coresight-cfg-sample.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(C) 2020 Linaro Limited. All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#include "coresight-config.h"
+#include "coresight-syscfg.h"
+
+/* create an alternate autofdo configuration */
+
+/* we will provide 4 sets of preset parameter values */
+#define AFDO2_NR_PRESETS 4
+/* the total number of parameters in used features - strobing has 2 */
+#define AFDO2_NR_PARAM_SUM 2
+
+static const char *afdo2_ref_names[] = {
+ "strobing",
+};
+
+/*
+ * set of presets leaves strobing window constant while varying period to allow
+ * experimentation with mark / space ratios for various workloads
+ */
+static u64 afdo2_presets[AFDO2_NR_PRESETS][AFDO2_NR_PARAM_SUM] = {
+ { 1000, 100 },
+ { 1000, 1000 },
+ { 1000, 5000 },
+ { 1000, 10000 },
+};
+
+struct cscfg_config_desc afdo2 = {
+ .name = "autofdo2",
+ .description = "Setup ETMs with strobing for autofdo\n"
+ "Supplied presets allow experimentation with mark-space ratio for various loads\n",
+ .nr_feat_refs = ARRAY_SIZE(afdo2_ref_names),
+ .feat_ref_names = afdo2_ref_names,
+ .nr_presets = AFDO2_NR_PRESETS,
+ .nr_total_params = AFDO2_NR_PARAM_SUM,
+ .presets = &afdo2_presets[0][0],
+};
+
+static struct cscfg_feature_desc *sample_feats[] = {
+ NULL
+};
+
+static struct cscfg_config_desc *sample_cfgs[] = {
+ &afdo2,
+ NULL
+};
+
+static struct cscfg_load_owner_info mod_owner = {
+ .type = CSCFG_OWNER_MODULE,
+ .owner_handle = THIS_MODULE,
+};
+
+/* module init and exit - just load and unload configs */
+static int __init cscfg_sample_init(void)
+{
+ return cscfg_load_config_sets(sample_cfgs, sample_feats, &mod_owner);
+}
+
+static void __exit cscfg_sample_exit(void)
+{
+ cscfg_unload_config_sets(&mod_owner);
+}
+
+module_init(cscfg_sample_init);
+module_exit(cscfg_sample_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mike Leach <mike.leach@linaro.org>");
+MODULE_DESCRIPTION("CoreSight Syscfg Example");
diff --git a/samples/fanotify/.gitignore b/samples/fanotify/.gitignore
new file mode 100644
index 000000000..d74593e8b
--- /dev/null
+++ b/samples/fanotify/.gitignore
@@ -0,0 +1 @@
+fs-monitor
diff --git a/samples/fanotify/Makefile b/samples/fanotify/Makefile
new file mode 100644
index 000000000..e20db1bdd
--- /dev/null
+++ b/samples/fanotify/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+userprogs-always-y += fs-monitor
+
+userccflags += -I usr/include -Wall
+
diff --git a/samples/fanotify/fs-monitor.c b/samples/fanotify/fs-monitor.c
new file mode 100644
index 000000000..608db24c4
--- /dev/null
+++ b/samples/fanotify/fs-monitor.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2021, Collabora Ltd.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <err.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <sys/fanotify.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef FAN_FS_ERROR
+#define FAN_FS_ERROR 0x00008000
+#define FAN_EVENT_INFO_TYPE_ERROR 5
+
+struct fanotify_event_info_error {
+ struct fanotify_event_info_header hdr;
+ __s32 error;
+ __u32 error_count;
+};
+#endif
+
+#ifndef FILEID_INO32_GEN
+#define FILEID_INO32_GEN 1
+#endif
+
+#ifndef FILEID_INVALID
+#define FILEID_INVALID 0xff
+#endif
+
+static void print_fh(struct file_handle *fh)
+{
+ int i;
+ uint32_t *h = (uint32_t *) fh->f_handle;
+
+ printf("\tfh: ");
+ for (i = 0; i < fh->handle_bytes; i++)
+ printf("%hhx", fh->f_handle[i]);
+ printf("\n");
+
+ printf("\tdecoded fh: ");
+ if (fh->handle_type == FILEID_INO32_GEN)
+ printf("inode=%u gen=%u\n", h[0], h[1]);
+ else if (fh->handle_type == FILEID_INVALID && !fh->handle_bytes)
+ printf("Type %d (Superblock error)\n", fh->handle_type);
+ else
+ printf("Type %d (Unknown)\n", fh->handle_type);
+
+}
+
+static void handle_notifications(char *buffer, int len)
+{
+ struct fanotify_event_metadata *event =
+ (struct fanotify_event_metadata *) buffer;
+ struct fanotify_event_info_header *info;
+ struct fanotify_event_info_error *err;
+ struct fanotify_event_info_fid *fid;
+ int off;
+
+ for (; FAN_EVENT_OK(event, len); event = FAN_EVENT_NEXT(event, len)) {
+
+ if (event->mask != FAN_FS_ERROR) {
+ printf("unexpected FAN MARK: %llx\n",
+ (unsigned long long)event->mask);
+ goto next_event;
+ }
+
+ if (event->fd != FAN_NOFD) {
+ printf("Unexpected fd (!= FAN_NOFD)\n");
+ goto next_event;
+ }
+
+ printf("FAN_FS_ERROR (len=%d)\n", event->event_len);
+
+ for (off = sizeof(*event) ; off < event->event_len;
+ off += info->len) {
+ info = (struct fanotify_event_info_header *)
+ ((char *) event + off);
+
+ switch (info->info_type) {
+ case FAN_EVENT_INFO_TYPE_ERROR:
+ err = (struct fanotify_event_info_error *) info;
+
+ printf("\tGeneric Error Record: len=%d\n",
+ err->hdr.len);
+ printf("\terror: %d\n", err->error);
+ printf("\terror_count: %d\n", err->error_count);
+ break;
+
+ case FAN_EVENT_INFO_TYPE_FID:
+ fid = (struct fanotify_event_info_fid *) info;
+
+ printf("\tfsid: %x%x\n",
+ fid->fsid.val[0], fid->fsid.val[1]);
+ print_fh((struct file_handle *) &fid->handle);
+ break;
+
+ default:
+ printf("\tUnknown info type=%d len=%d:\n",
+ info->info_type, info->len);
+ }
+ }
+next_event:
+ printf("---\n\n");
+ }
+}
+
+int main(int argc, char **argv)
+{
+ int fd;
+
+ char buffer[BUFSIZ];
+
+ if (argc < 2) {
+ printf("Missing path argument\n");
+ return 1;
+ }
+
+ fd = fanotify_init(FAN_CLASS_NOTIF|FAN_REPORT_FID, O_RDONLY);
+ if (fd < 0)
+ errx(1, "fanotify_init");
+
+ if (fanotify_mark(fd, FAN_MARK_ADD|FAN_MARK_FILESYSTEM,
+ FAN_FS_ERROR, AT_FDCWD, argv[1])) {
+ errx(1, "fanotify_mark");
+ }
+
+ while (1) {
+ int n = read(fd, buffer, BUFSIZ);
+
+ if (n < 0)
+ errx(1, "read");
+
+ handle_notifications(buffer, n);
+ }
+
+ return 0;
+}
diff --git a/samples/fprobe/Makefile b/samples/fprobe/Makefile
new file mode 100644
index 000000000..ecccbfa6e
--- /dev/null
+++ b/samples/fprobe/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_SAMPLE_FPROBE) += fprobe_example.o
diff --git a/samples/fprobe/fprobe_example.c b/samples/fprobe/fprobe_example.c
new file mode 100644
index 000000000..dd794990a
--- /dev/null
+++ b/samples/fprobe/fprobe_example.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Here's a sample kernel module showing the use of fprobe to dump a
+ * stack trace and selected registers when kernel_clone() is called.
+ *
+ * For more information on theory of operation of kprobes, see
+ * Documentation/trace/kprobes.rst
+ *
+ * You will see the trace data in /var/log/messages and on the console
+ * whenever kernel_clone() is invoked to create a new process.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fprobe.h>
+#include <linux/sched/debug.h>
+#include <linux/slab.h>
+
+#define BACKTRACE_DEPTH 16
+#define MAX_SYMBOL_LEN 4096
+static struct fprobe sample_probe;
+static unsigned long nhit;
+
+static char symbol[MAX_SYMBOL_LEN] = "kernel_clone";
+module_param_string(symbol, symbol, sizeof(symbol), 0644);
+MODULE_PARM_DESC(symbol, "Probed symbol(s), given by comma separated symbols or a wildcard pattern.");
+
+static char nosymbol[MAX_SYMBOL_LEN] = "";
+module_param_string(nosymbol, nosymbol, sizeof(nosymbol), 0644);
+MODULE_PARM_DESC(nosymbol, "Not-probed symbols, given by a wildcard pattern.");
+
+static bool stackdump = true;
+module_param(stackdump, bool, 0644);
+MODULE_PARM_DESC(stackdump, "Enable stackdump.");
+
+static bool use_trace = false;
+module_param(use_trace, bool, 0644);
+MODULE_PARM_DESC(use_trace, "Use trace_printk instead of printk. This is only for debugging.");
+
+static void show_backtrace(void)
+{
+ unsigned long stacks[BACKTRACE_DEPTH];
+ unsigned int len;
+
+ len = stack_trace_save(stacks, BACKTRACE_DEPTH, 2);
+ stack_trace_print(stacks, len, 24);
+}
+
+static void sample_entry_handler(struct fprobe *fp, unsigned long ip,
+ struct pt_regs *regs, void *data)
+{
+ if (use_trace)
+ /*
+ * This is just an example, no kernel code should call
+ * trace_printk() except when actively debugging.
+ */
+ trace_printk("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
+ else
+ pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
+ nhit++;
+ if (stackdump)
+ show_backtrace();
+}
+
+static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs,
+ void *data)
+{
+ unsigned long rip = instruction_pointer(regs);
+
+ if (use_trace)
+ /*
+ * This is just an example, no kernel code should call
+ * trace_printk() except when actively debugging.
+ */
+ trace_printk("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
+ (void *)ip, (void *)ip, (void *)rip, (void *)rip);
+ else
+ pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
+ (void *)ip, (void *)ip, (void *)rip, (void *)rip);
+ nhit++;
+ if (stackdump)
+ show_backtrace();
+}
+
+static int __init fprobe_init(void)
+{
+ char *p, *symbuf = NULL;
+ const char **syms;
+ int ret, count, i;
+
+ sample_probe.entry_handler = sample_entry_handler;
+ sample_probe.exit_handler = sample_exit_handler;
+
+ if (strchr(symbol, '*')) {
+ /* filter based fprobe */
+ ret = register_fprobe(&sample_probe, symbol,
+ nosymbol[0] == '\0' ? NULL : nosymbol);
+ goto out;
+ } else if (!strchr(symbol, ',')) {
+ symbuf = symbol;
+ ret = register_fprobe_syms(&sample_probe, (const char **)&symbuf, 1);
+ goto out;
+ }
+
+ /* Comma separated symbols */
+ symbuf = kstrdup(symbol, GFP_KERNEL);
+ if (!symbuf)
+ return -ENOMEM;
+ p = symbuf;
+ count = 1;
+ while ((p = strchr(++p, ',')) != NULL)
+ count++;
+
+ pr_info("%d symbols found\n", count);
+
+ syms = kcalloc(count, sizeof(char *), GFP_KERNEL);
+ if (!syms) {
+ kfree(symbuf);
+ return -ENOMEM;
+ }
+
+ p = symbuf;
+ for (i = 0; i < count; i++)
+ syms[i] = strsep(&p, ",");
+
+ ret = register_fprobe_syms(&sample_probe, syms, count);
+ kfree(syms);
+ kfree(symbuf);
+out:
+ if (ret < 0)
+ pr_err("register_fprobe failed, returned %d\n", ret);
+ else
+ pr_info("Planted fprobe at %s\n", symbol);
+
+ return ret;
+}
+
+static void __exit fprobe_exit(void)
+{
+ unregister_fprobe(&sample_probe);
+
+ pr_info("fprobe at %s unregistered. %ld times hit, %ld times missed\n",
+ symbol, nhit, sample_probe.nmissed);
+}
+
+module_init(fprobe_init)
+module_exit(fprobe_exit)
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/Makefile b/samples/ftrace/Makefile
new file mode 100644
index 000000000..faf8cdb79
--- /dev/null
+++ b/samples/ftrace/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi-modify.o
+
+CFLAGS_sample-trace-array.o := -I$(src)
+obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c
new file mode 100644
index 000000000..39146fa83
--- /dev/null
+++ b/samples/ftrace/ftrace-direct-modify.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/ftrace.h>
+#include <asm/asm-offsets.h>
+
+extern void my_direct_func1(void);
+extern void my_direct_func2(void);
+
+void my_direct_func1(void)
+{
+ trace_printk("my direct func1\n");
+}
+
+void my_direct_func2(void)
+{
+ trace_printk("my direct func2\n");
+}
+
+extern void my_tramp1(void *);
+extern void my_tramp2(void *);
+
+static unsigned long my_ip = (unsigned long)schedule;
+
+#ifdef CONFIG_X86_64
+
+#include <asm/ibt.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp1, @function\n"
+" .globl my_tramp1\n"
+" my_tramp1:"
+ ASM_ENDBR
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" call my_direct_func1\n"
+" leave\n"
+" .size my_tramp1, .-my_tramp1\n"
+ ASM_RET
+
+" .type my_tramp2, @function\n"
+" .globl my_tramp2\n"
+" my_tramp2:"
+ ASM_ENDBR
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" call my_direct_func2\n"
+" leave\n"
+ ASM_RET
+" .size my_tramp2, .-my_tramp2\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp1, @function\n"
+" .globl my_tramp1\n"
+" my_tramp1:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" brasl %r14,my_direct_func1\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp1, .-my_tramp1\n"
+" .type my_tramp2, @function\n"
+" .globl my_tramp2\n"
+" my_tramp2:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" brasl %r14,my_direct_func2\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp2, .-my_tramp2\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
+static unsigned long my_tramp = (unsigned long)my_tramp1;
+static unsigned long tramps[2] = {
+ (unsigned long)my_tramp1,
+ (unsigned long)my_tramp2,
+};
+
+static int simple_thread(void *arg)
+{
+ static int t;
+ int ret = 0;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2 * HZ);
+
+ if (ret)
+ continue;
+ t ^= 1;
+ ret = modify_ftrace_direct(my_ip, my_tramp, tramps[t]);
+ if (!ret)
+ my_tramp = tramps[t];
+ WARN_ON_ONCE(ret);
+ }
+
+ return 0;
+}
+
+static struct task_struct *simple_tsk;
+
+static int __init ftrace_direct_init(void)
+{
+ int ret;
+
+ ret = register_ftrace_direct(my_ip, my_tramp);
+ if (!ret)
+ simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
+ return ret;
+}
+
+static void __exit ftrace_direct_exit(void)
+{
+ kthread_stop(simple_tsk);
+ unregister_ftrace_direct(my_ip, my_tramp);
+}
+
+module_init(ftrace_direct_init);
+module_exit(ftrace_direct_exit);
+
+MODULE_AUTHOR("Steven Rostedt");
+MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct()");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
new file mode 100644
index 000000000..6f23b1413
--- /dev/null
+++ b/samples/ftrace/ftrace-direct-multi-modify.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/ftrace.h>
+#include <asm/asm-offsets.h>
+
+extern void my_direct_func1(unsigned long ip);
+extern void my_direct_func2(unsigned long ip);
+
+void my_direct_func1(unsigned long ip)
+{
+ trace_printk("my direct func1 ip %lx\n", ip);
+}
+
+void my_direct_func2(unsigned long ip)
+{
+ trace_printk("my direct func2 ip %lx\n", ip);
+}
+
+extern void my_tramp1(void *);
+extern void my_tramp2(void *);
+
+#ifdef CONFIG_X86_64
+
+#include <asm/ibt.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp1, @function\n"
+" .globl my_tramp1\n"
+" my_tramp1:"
+ ASM_ENDBR
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" pushq %rdi\n"
+" movq 8(%rbp), %rdi\n"
+" call my_direct_func1\n"
+" popq %rdi\n"
+" leave\n"
+ ASM_RET
+" .size my_tramp1, .-my_tramp1\n"
+
+" .type my_tramp2, @function\n"
+" .globl my_tramp2\n"
+" my_tramp2:"
+ ASM_ENDBR
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" pushq %rdi\n"
+" movq 8(%rbp), %rdi\n"
+" call my_direct_func2\n"
+" popq %rdi\n"
+" leave\n"
+ ASM_RET
+" .size my_tramp2, .-my_tramp2\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp1, @function\n"
+" .globl my_tramp1\n"
+" my_tramp1:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" lgr %r2,%r0\n"
+" brasl %r14,my_direct_func1\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp1, .-my_tramp1\n"
+"\n"
+" .type my_tramp2, @function\n"
+" .globl my_tramp2\n"
+" my_tramp2:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" lgr %r2,%r0\n"
+" brasl %r14,my_direct_func2\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp2, .-my_tramp2\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
+static unsigned long my_tramp = (unsigned long)my_tramp1;
+static unsigned long tramps[2] = {
+ (unsigned long)my_tramp1,
+ (unsigned long)my_tramp2,
+};
+
+static struct ftrace_ops direct;
+
+static int simple_thread(void *arg)
+{
+ static int t;
+ int ret = 0;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2 * HZ);
+
+ if (ret)
+ continue;
+ t ^= 1;
+ ret = modify_ftrace_direct_multi(&direct, tramps[t]);
+ if (!ret)
+ my_tramp = tramps[t];
+ WARN_ON_ONCE(ret);
+ }
+
+ return 0;
+}
+
+static struct task_struct *simple_tsk;
+
+static int __init ftrace_direct_multi_init(void)
+{
+ int ret;
+
+ ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
+ ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
+
+ ret = register_ftrace_direct_multi(&direct, my_tramp);
+
+ if (!ret)
+ simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
+ return ret;
+}
+
+static void __exit ftrace_direct_multi_exit(void)
+{
+ kthread_stop(simple_tsk);
+ unregister_ftrace_direct_multi(&direct, my_tramp);
+ ftrace_free_filter(&direct);
+}
+
+module_init(ftrace_direct_multi_init);
+module_exit(ftrace_direct_multi_exit);
+
+MODULE_AUTHOR("Jiri Olsa");
+MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct_multi()");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c
new file mode 100644
index 000000000..a9a5c90fb
--- /dev/null
+++ b/samples/ftrace/ftrace-direct-multi.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+
+#include <linux/mm.h> /* for handle_mm_fault() */
+#include <linux/ftrace.h>
+#include <linux/sched/stat.h>
+#include <asm/asm-offsets.h>
+
+extern void my_direct_func(unsigned long ip);
+
+void my_direct_func(unsigned long ip)
+{
+ trace_printk("ip %lx\n", ip);
+}
+
+extern void my_tramp(void *);
+
+#ifdef CONFIG_X86_64
+
+#include <asm/ibt.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:"
+ ASM_ENDBR
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" pushq %rdi\n"
+" movq 8(%rbp), %rdi\n"
+" call my_direct_func\n"
+" popq %rdi\n"
+" leave\n"
+ ASM_RET
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" lgr %r2,%r0\n"
+" brasl %r14,my_direct_func\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
+static struct ftrace_ops direct;
+
+static int __init ftrace_direct_multi_init(void)
+{
+ ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
+ ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
+
+ return register_ftrace_direct_multi(&direct, (unsigned long) my_tramp);
+}
+
+static void __exit ftrace_direct_multi_exit(void)
+{
+ unregister_ftrace_direct_multi(&direct, (unsigned long) my_tramp);
+ ftrace_free_filter(&direct);
+}
+
+module_init(ftrace_direct_multi_init);
+module_exit(ftrace_direct_multi_exit);
+
+MODULE_AUTHOR("Jiri Olsa");
+MODULE_DESCRIPTION("Example use case of using register_ftrace_direct_multi()");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c
new file mode 100644
index 000000000..14c6859e1
--- /dev/null
+++ b/samples/ftrace/ftrace-direct-too.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+
+#include <linux/mm.h> /* for handle_mm_fault() */
+#include <linux/ftrace.h>
+#include <asm/asm-offsets.h>
+
+extern void my_direct_func(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, struct pt_regs *regs);
+
+void my_direct_func(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, struct pt_regs *regs)
+{
+ trace_printk("handle mm fault vma=%p address=%lx flags=%x regs=%p\n",
+ vma, address, flags, regs);
+}
+
+extern void my_tramp(void *);
+
+#ifdef CONFIG_X86_64
+
+#include <asm/ibt.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:"
+ ASM_ENDBR
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" pushq %rdi\n"
+" pushq %rsi\n"
+" pushq %rdx\n"
+" pushq %rcx\n"
+" call my_direct_func\n"
+" popq %rcx\n"
+" popq %rdx\n"
+" popq %rsi\n"
+" popq %rdi\n"
+" leave\n"
+ ASM_RET
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" brasl %r14,my_direct_func\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
+static int __init ftrace_direct_init(void)
+{
+ return register_ftrace_direct((unsigned long)handle_mm_fault,
+ (unsigned long)my_tramp);
+}
+
+static void __exit ftrace_direct_exit(void)
+{
+ unregister_ftrace_direct((unsigned long)handle_mm_fault,
+ (unsigned long)my_tramp);
+}
+
+module_init(ftrace_direct_init);
+module_exit(ftrace_direct_exit);
+
+MODULE_AUTHOR("Steven Rostedt");
+MODULE_DESCRIPTION("Another example use case of using register_ftrace_direct()");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c
new file mode 100644
index 000000000..e8f1e440b
--- /dev/null
+++ b/samples/ftrace/ftrace-direct.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+
+#include <linux/sched.h> /* for wake_up_process() */
+#include <linux/ftrace.h>
+#include <asm/asm-offsets.h>
+
+extern void my_direct_func(struct task_struct *p);
+
+void my_direct_func(struct task_struct *p)
+{
+ trace_printk("waking up %s-%d\n", p->comm, p->pid);
+}
+
+extern void my_tramp(void *);
+
+#ifdef CONFIG_X86_64
+
+#include <asm/ibt.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:"
+ ASM_ENDBR
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" pushq %rdi\n"
+" call my_direct_func\n"
+" popq %rdi\n"
+" leave\n"
+ ASM_RET
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" brasl %r14,my_direct_func\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
+static int __init ftrace_direct_init(void)
+{
+ return register_ftrace_direct((unsigned long)wake_up_process,
+ (unsigned long)my_tramp);
+}
+
+static void __exit ftrace_direct_exit(void)
+{
+ unregister_ftrace_direct((unsigned long)wake_up_process,
+ (unsigned long)my_tramp);
+}
+
+module_init(ftrace_direct_init);
+module_exit(ftrace_direct_exit);
+
+MODULE_AUTHOR("Steven Rostedt");
+MODULE_DESCRIPTION("Example use case of using register_ftrace_direct()");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/sample-trace-array.c b/samples/ftrace/sample-trace-array.c
new file mode 100644
index 000000000..6aba02a31
--- /dev/null
+++ b/samples/ftrace/sample-trace-array.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/trace.h>
+#include <linux/trace_events.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+
+/*
+ * Any file that uses trace points, must include the header.
+ * But only one file, must include the header by defining
+ * CREATE_TRACE_POINTS first. This will make the C code that
+ * creates the handles for the trace points.
+ */
+#define CREATE_TRACE_POINTS
+#include "sample-trace-array.h"
+
+struct trace_array *tr;
+static void mytimer_handler(struct timer_list *unused);
+static struct task_struct *simple_tsk;
+
+static void trace_work_fn(struct work_struct *work)
+{
+ /*
+ * Disable tracing for event "sample_event".
+ */
+ trace_array_set_clr_event(tr, "sample-subsystem", "sample_event",
+ false);
+}
+static DECLARE_WORK(trace_work, trace_work_fn);
+
+/*
+ * mytimer: Timer setup to disable tracing for event "sample_event". This
+ * timer is only for the purposes of the sample module to demonstrate access of
+ * Ftrace instances from within kernel.
+ */
+static DEFINE_TIMER(mytimer, mytimer_handler);
+
+static void mytimer_handler(struct timer_list *unused)
+{
+ schedule_work(&trace_work);
+}
+
+static void simple_thread_func(int count)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ /*
+ * Printing count value using trace_array_printk() - trace_printk()
+ * equivalent for the instance buffers.
+ */
+ trace_array_printk(tr, _THIS_IP_, "trace_array_printk: count=%d\n",
+ count);
+ /*
+ * Tracepoint for event "sample_event". This will print the
+ * current value of count and current jiffies.
+ */
+ trace_sample_event(count, jiffies);
+}
+
+static int simple_thread(void *arg)
+{
+ int count = 0;
+ unsigned long delay = msecs_to_jiffies(5000);
+
+ /*
+ * Enable tracing for "sample_event".
+ */
+ trace_array_set_clr_event(tr, "sample-subsystem", "sample_event", true);
+
+ /*
+ * Adding timer - mytimer. This timer will disable tracing after
+ * delay seconds.
+ *
+ */
+ add_timer(&mytimer);
+ mod_timer(&mytimer, jiffies+delay);
+
+ while (!kthread_should_stop())
+ simple_thread_func(count++);
+
+ del_timer(&mytimer);
+ cancel_work_sync(&trace_work);
+
+ /*
+ * trace_array_put() decrements the reference counter associated with
+ * the trace array - "tr". We are done using the trace array, hence
+ * decrement the reference counter so that it can be destroyed using
+ * trace_array_destroy().
+ */
+ trace_array_put(tr);
+
+ return 0;
+}
+
+static int __init sample_trace_array_init(void)
+{
+ /*
+ * Return a pointer to the trace array with name "sample-instance" if it
+ * exists, else create a new trace array.
+ *
+ * NOTE: This function increments the reference counter
+ * associated with the trace array - "tr".
+ */
+ tr = trace_array_get_by_name("sample-instance");
+
+ if (!tr)
+ return -1;
+ /*
+ * If context specific per-cpu buffers havent already been allocated.
+ */
+ trace_printk_init_buffers();
+
+ simple_tsk = kthread_run(simple_thread, NULL, "sample-instance");
+ if (IS_ERR(simple_tsk)) {
+ trace_array_put(tr);
+ trace_array_destroy(tr);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit sample_trace_array_exit(void)
+{
+ kthread_stop(simple_tsk);
+
+ /*
+ * We are unloading our module and no longer require the trace array.
+ * Remove/destroy "tr" using trace_array_destroy()
+ */
+ trace_array_destroy(tr);
+}
+
+module_init(sample_trace_array_init);
+module_exit(sample_trace_array_exit);
+
+MODULE_AUTHOR("Divya Indi");
+MODULE_DESCRIPTION("Sample module for kernel access to Ftrace instances");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/sample-trace-array.h b/samples/ftrace/sample-trace-array.h
new file mode 100644
index 000000000..6f8962428
--- /dev/null
+++ b/samples/ftrace/sample-trace-array.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * If TRACE_SYSTEM is defined, that will be the directory created
+ * in the ftrace directory under /sys/kernel/tracing/events/<system>
+ *
+ * The define_trace.h below will also look for a file name of
+ * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
+ * In this case, it would look for sample-trace.h
+ *
+ * If the header name will be different than the system name
+ * (as in this case), then you can override the header name that
+ * define_trace.h will look up by defining TRACE_INCLUDE_FILE
+ *
+ * This file is called sample-trace-array.h but we want the system
+ * to be called "sample-subsystem". Therefore we must define the name of this
+ * file:
+ *
+ * #define TRACE_INCLUDE_FILE sample-trace-array
+ *
+ * As we do in the bottom of this file.
+ *
+ * Notice that TRACE_SYSTEM should be defined outside of #if
+ * protection, just like TRACE_INCLUDE_FILE.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sample-subsystem
+
+/*
+ * TRACE_SYSTEM is expected to be a C valid variable (alpha-numeric
+ * and underscore), although it may start with numbers. If for some
+ * reason it is not, you need to add the following lines:
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR sample_subsystem
+
+/*
+ * But the above is only needed if TRACE_SYSTEM is not alpha-numeric
+ * and underscored. By default, TRACE_SYSTEM_VAR will be equal to
+ * TRACE_SYSTEM. As TRACE_SYSTEM_VAR must be alpha-numeric, if
+ * TRACE_SYSTEM is not, then TRACE_SYSTEM_VAR must be defined with
+ * only alpha-numeric and underscores.
+ *
+ * The TRACE_SYSTEM_VAR is only used internally and not visible to
+ * user space.
+ */
+
+/*
+ * Notice that this file is not protected like a normal header.
+ * We also must allow for rereading of this file. The
+ *
+ * || defined(TRACE_HEADER_MULTI_READ)
+ *
+ * serves this purpose.
+ */
+#if !defined(_SAMPLE_TRACE_ARRAY_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _SAMPLE_TRACE_ARRAY_H
+
+#include <linux/tracepoint.h>
+TRACE_EVENT(sample_event,
+
+ TP_PROTO(int count, unsigned long time),
+
+ TP_ARGS(count, time),
+
+ TP_STRUCT__entry(
+ __field(int, count)
+ __field(unsigned long, time)
+ ),
+
+ TP_fast_assign(
+ __entry->count = count;
+ __entry->time = time;
+ ),
+
+ TP_printk("count value=%d at jiffies=%lu", __entry->count,
+ __entry->time)
+ );
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE sample-trace-array
+#include <trace/define_trace.h>
diff --git a/samples/hidraw/.gitignore b/samples/hidraw/.gitignore
new file mode 100644
index 000000000..5233ab632
--- /dev/null
+++ b/samples/hidraw/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/hid-example
diff --git a/samples/hidraw/Makefile b/samples/hidraw/Makefile
new file mode 100644
index 000000000..594d989e5
--- /dev/null
+++ b/samples/hidraw/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+userprogs-always-y += hid-example
+
+userccflags += -I usr/include
diff --git a/samples/hidraw/hid-example.c b/samples/hidraw/hid-example.c
new file mode 100644
index 000000000..0f73ace3c
--- /dev/null
+++ b/samples/hidraw/hid-example.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hidraw Userspace Example
+ *
+ * Copyright (c) 2010 Alan Ott <alan@signal11.us>
+ * Copyright (c) 2010 Signal 11 Software
+ *
+ * The code may be used by anyone for any purpose,
+ * and can serve as a starting point for developing
+ * applications using hidraw.
+ */
+
+/* Linux */
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/hidraw.h>
+
+/*
+ * Ugly hack to work around failing compilation on systems that don't
+ * yet populate new version of hidraw.h to userspace.
+ */
+#ifndef HIDIOCSFEATURE
+#warning Please have your distro update the userspace kernel headers
+#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
+#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
+#endif
+
+/* Unix */
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+/* C */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+
+const char *bus_str(int bus);
+
+int main(int argc, char **argv)
+{
+ int fd;
+ int i, res, desc_size = 0;
+ char buf[256];
+ struct hidraw_report_descriptor rpt_desc;
+ struct hidraw_devinfo info;
+ char *device = "/dev/hidraw0";
+
+ if (argc > 1)
+ device = argv[1];
+
+ /* Open the Device with non-blocking reads. In real life,
+ don't use a hard coded path; use libudev instead. */
+ fd = open(device, O_RDWR|O_NONBLOCK);
+
+ if (fd < 0) {
+ perror("Unable to open device");
+ return 1;
+ }
+
+ memset(&rpt_desc, 0x0, sizeof(rpt_desc));
+ memset(&info, 0x0, sizeof(info));
+ memset(buf, 0x0, sizeof(buf));
+
+ /* Get Report Descriptor Size */
+ res = ioctl(fd, HIDIOCGRDESCSIZE, &desc_size);
+ if (res < 0)
+ perror("HIDIOCGRDESCSIZE");
+ else
+ printf("Report Descriptor Size: %d\n", desc_size);
+
+ /* Get Report Descriptor */
+ rpt_desc.size = desc_size;
+ res = ioctl(fd, HIDIOCGRDESC, &rpt_desc);
+ if (res < 0) {
+ perror("HIDIOCGRDESC");
+ } else {
+ printf("Report Descriptor:\n");
+ for (i = 0; i < rpt_desc.size; i++)
+ printf("%hhx ", rpt_desc.value[i]);
+ puts("\n");
+ }
+
+ /* Get Raw Name */
+ res = ioctl(fd, HIDIOCGRAWNAME(256), buf);
+ if (res < 0)
+ perror("HIDIOCGRAWNAME");
+ else
+ printf("Raw Name: %s\n", buf);
+
+ /* Get Physical Location */
+ res = ioctl(fd, HIDIOCGRAWPHYS(256), buf);
+ if (res < 0)
+ perror("HIDIOCGRAWPHYS");
+ else
+ printf("Raw Phys: %s\n", buf);
+
+ /* Get Raw Info */
+ res = ioctl(fd, HIDIOCGRAWINFO, &info);
+ if (res < 0) {
+ perror("HIDIOCGRAWINFO");
+ } else {
+ printf("Raw Info:\n");
+ printf("\tbustype: %d (%s)\n",
+ info.bustype, bus_str(info.bustype));
+ printf("\tvendor: 0x%04hx\n", info.vendor);
+ printf("\tproduct: 0x%04hx\n", info.product);
+ }
+
+ /* Set Feature */
+ buf[0] = 0x9; /* Report Number */
+ buf[1] = 0xff;
+ buf[2] = 0xff;
+ buf[3] = 0xff;
+ res = ioctl(fd, HIDIOCSFEATURE(4), buf);
+ if (res < 0)
+ perror("HIDIOCSFEATURE");
+ else
+ printf("ioctl HIDIOCSFEATURE returned: %d\n", res);
+
+ /* Get Feature */
+ buf[0] = 0x9; /* Report Number */
+ res = ioctl(fd, HIDIOCGFEATURE(256), buf);
+ if (res < 0) {
+ perror("HIDIOCGFEATURE");
+ } else {
+ printf("ioctl HIDIOCGFEATURE returned: %d\n", res);
+ printf("Report data:\n\t");
+ for (i = 0; i < res; i++)
+ printf("%hhx ", buf[i]);
+ puts("\n");
+ }
+
+ /* Send a Report to the Device */
+ buf[0] = 0x1; /* Report Number */
+ buf[1] = 0x77;
+ res = write(fd, buf, 2);
+ if (res < 0) {
+ printf("Error: %d\n", errno);
+ perror("write");
+ } else {
+ printf("write() wrote %d bytes\n", res);
+ }
+
+ /* Get a report from the device */
+ res = read(fd, buf, 16);
+ if (res < 0) {
+ perror("read");
+ } else {
+ printf("read() read %d bytes:\n\t", res);
+ for (i = 0; i < res; i++)
+ printf("%hhx ", buf[i]);
+ puts("\n");
+ }
+ close(fd);
+ return 0;
+}
+
+const char *
+bus_str(int bus)
+{
+ switch (bus) {
+ case BUS_USB:
+ return "USB";
+ break;
+ case BUS_HIL:
+ return "HIL";
+ break;
+ case BUS_BLUETOOTH:
+ return "Bluetooth";
+ break;
+ case BUS_VIRTUAL:
+ return "Virtual";
+ break;
+ default:
+ return "Other";
+ break;
+ }
+}
diff --git a/samples/hw_breakpoint/Makefile b/samples/hw_breakpoint/Makefile
new file mode 100644
index 000000000..ef4b6fdd7
--- /dev/null
+++ b/samples/hw_breakpoint/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SAMPLE_HW_BREAKPOINT) += data_breakpoint.o
diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c
new file mode 100644
index 000000000..b99322f18
--- /dev/null
+++ b/samples/hw_breakpoint/data_breakpoint.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * data_breakpoint.c - Sample HW Breakpoint file to watch kernel data address
+ *
+ * usage: insmod data_breakpoint.ko ksym=<ksym_name>
+ *
+ * This file is a kernel module that places a breakpoint over ksym_name kernel
+ * variable using Hardware Breakpoint register. The corresponding handler which
+ * prints a backtrace is invoked every time a write operation is performed on
+ * that variable.
+ *
+ * Copyright (C) IBM Corporation, 2009
+ *
+ * Author: K.Prasad <prasad@linux.vnet.ibm.com>
+ */
+#include <linux/module.h> /* Needed by all modules */
+#include <linux/kernel.h> /* Needed for KERN_INFO */
+#include <linux/init.h> /* Needed for the macros */
+#include <linux/kallsyms.h>
+
+#include <linux/perf_event.h>
+#include <linux/hw_breakpoint.h>
+
+struct perf_event * __percpu *sample_hbp;
+
+static char ksym_name[KSYM_NAME_LEN] = "jiffies";
+module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
+MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any"
+ " write operations on the kernel symbol");
+
+static void sample_hbp_handler(struct perf_event *bp,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ printk(KERN_INFO "%s value is changed\n", ksym_name);
+ dump_stack();
+ printk(KERN_INFO "Dump stack from sample_hbp_handler\n");
+}
+
+static int __init hw_break_module_init(void)
+{
+ int ret;
+ struct perf_event_attr attr;
+ void *addr = __symbol_get(ksym_name);
+
+ if (!addr)
+ return -ENXIO;
+
+ hw_breakpoint_init(&attr);
+ attr.bp_addr = (unsigned long)addr;
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
+ attr.bp_type = HW_BREAKPOINT_W;
+
+ sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler, NULL);
+ if (IS_ERR((void __force *)sample_hbp)) {
+ ret = PTR_ERR((void __force *)sample_hbp);
+ goto fail;
+ }
+
+ printk(KERN_INFO "HW Breakpoint for %s write installed\n", ksym_name);
+
+ return 0;
+
+fail:
+ printk(KERN_INFO "Breakpoint registration failed\n");
+
+ return ret;
+}
+
+static void __exit hw_break_module_exit(void)
+{
+ unregister_wide_hw_breakpoint(sample_hbp);
+#ifdef CONFIG_MODULE_UNLOAD
+ __symbol_put(ksym_name);
+#endif
+ printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name);
+}
+
+module_init(hw_break_module_init);
+module_exit(hw_break_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("K.Prasad");
+MODULE_DESCRIPTION("ksym breakpoint");
diff --git a/samples/kdb/Makefile b/samples/kdb/Makefile
new file mode 100644
index 000000000..947cb8522
--- /dev/null
+++ b/samples/kdb/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SAMPLE_KDB) += kdb_hello.o
diff --git a/samples/kdb/kdb_hello.c b/samples/kdb/kdb_hello.c
new file mode 100644
index 000000000..82736e5a5
--- /dev/null
+++ b/samples/kdb/kdb_hello.c
@@ -0,0 +1,58 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2010 Wind River Systems, Inc. All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kdb.h>
+
+/*
+ * All kdb shell command call backs receive argc and argv, where
+ * argv[0] is the command the end user typed
+ */
+static int kdb_hello_cmd(int argc, const char **argv)
+{
+ if (argc > 1)
+ return KDB_ARGCOUNT;
+
+ if (argc)
+ kdb_printf("Hello %s.\n", argv[1]);
+ else
+ kdb_printf("Hello world!\n");
+
+ return 0;
+}
+
+static kdbtab_t hello_cmd = {
+ .name = "hello",
+ .func = kdb_hello_cmd,
+ .usage = "[string]",
+ .help = "Say Hello World or Hello [string]",
+};
+
+static int __init kdb_hello_cmd_init(void)
+{
+ /*
+ * Registration of a dynamically added kdb command is done with
+ * kdb_register().
+ */
+ kdb_register(&hello_cmd);
+ return 0;
+}
+
+static void __exit kdb_hello_cmd_exit(void)
+{
+ kdb_unregister(&hello_cmd);
+}
+
+module_init(kdb_hello_cmd_init);
+module_exit(kdb_hello_cmd_exit);
+
+MODULE_AUTHOR("WindRiver");
+MODULE_DESCRIPTION("KDB example to add a hello command");
+MODULE_LICENSE("GPL");
diff --git a/samples/kfifo/Makefile b/samples/kfifo/Makefile
new file mode 100644
index 000000000..0af5250ad
--- /dev/null
+++ b/samples/kfifo/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SAMPLE_KFIFO) += bytestream-example.o dma-example.o inttype-example.o record-example.o
diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
new file mode 100644
index 000000000..642d0748c
--- /dev/null
+++ b/samples/kfifo/bytestream-example.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sample kfifo byte stream implementation
+ *
+ * Copyright (C) 2010 Stefani Seibold <stefani@seibold.net>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/kfifo.h>
+
+/*
+ * This module shows how to create a byte stream fifo.
+ */
+
+/* fifo size in elements (bytes) */
+#define FIFO_SIZE 32
+
+/* name of the proc entry */
+#define PROC_FIFO "bytestream-fifo"
+
+/* lock for procfs read access */
+static DEFINE_MUTEX(read_access);
+
+/* lock for procfs write access */
+static DEFINE_MUTEX(write_access);
+
+/*
+ * define DYNAMIC in this example for a dynamically allocated fifo.
+ *
+ * Otherwise the fifo storage will be a part of the fifo structure.
+ */
+#if 0
+#define DYNAMIC
+#endif
+
+#ifdef DYNAMIC
+static struct kfifo test;
+#else
+static DECLARE_KFIFO(test, unsigned char, FIFO_SIZE);
+#endif
+
+static const unsigned char expected_result[FIFO_SIZE] = {
+ 3, 4, 5, 6, 7, 8, 9, 0,
+ 1, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static int __init testfunc(void)
+{
+ unsigned char buf[6];
+ unsigned char i, j;
+ unsigned int ret;
+
+ printk(KERN_INFO "byte stream fifo test start\n");
+
+ /* put string into the fifo */
+ kfifo_in(&test, "hello", 5);
+
+ /* put values into the fifo */
+ for (i = 0; i != 10; i++)
+ kfifo_put(&test, i);
+
+ /* show the number of used elements */
+ printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
+
+ /* get max of 5 bytes from the fifo */
+ i = kfifo_out(&test, buf, 5);
+ printk(KERN_INFO "buf: %.*s\n", i, buf);
+
+ /* get max of 2 elements from the fifo */
+ ret = kfifo_out(&test, buf, 2);
+ printk(KERN_INFO "ret: %d\n", ret);
+ /* and put it back to the end of the fifo */
+ ret = kfifo_in(&test, buf, ret);
+ printk(KERN_INFO "ret: %d\n", ret);
+
+ /* skip first element of the fifo */
+ printk(KERN_INFO "skip 1st element\n");
+ kfifo_skip(&test);
+
+ /* put values into the fifo until is full */
+ for (i = 20; kfifo_put(&test, i); i++)
+ ;
+
+ printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
+
+ /* show the first value without removing from the fifo */
+ if (kfifo_peek(&test, &i))
+ printk(KERN_INFO "%d\n", i);
+
+ /* check the correctness of all values in the fifo */
+ j = 0;
+ while (kfifo_get(&test, &i)) {
+ printk(KERN_INFO "item = %d\n", i);
+ if (i != expected_result[j++]) {
+ printk(KERN_WARNING "value mismatch: test failed\n");
+ return -EIO;
+ }
+ }
+ if (j != ARRAY_SIZE(expected_result)) {
+ printk(KERN_WARNING "size mismatch: test failed\n");
+ return -EIO;
+ }
+ printk(KERN_INFO "test passed\n");
+
+ return 0;
+}
+
+static ssize_t fifo_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ unsigned int copied;
+
+ if (mutex_lock_interruptible(&write_access))
+ return -ERESTARTSYS;
+
+ ret = kfifo_from_user(&test, buf, count, &copied);
+
+ mutex_unlock(&write_access);
+ if (ret)
+ return ret;
+
+ return copied;
+}
+
+static ssize_t fifo_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ unsigned int copied;
+
+ if (mutex_lock_interruptible(&read_access))
+ return -ERESTARTSYS;
+
+ ret = kfifo_to_user(&test, buf, count, &copied);
+
+ mutex_unlock(&read_access);
+ if (ret)
+ return ret;
+
+ return copied;
+}
+
+static const struct proc_ops fifo_proc_ops = {
+ .proc_read = fifo_read,
+ .proc_write = fifo_write,
+ .proc_lseek = noop_llseek,
+};
+
+static int __init example_init(void)
+{
+#ifdef DYNAMIC
+ int ret;
+
+ ret = kfifo_alloc(&test, FIFO_SIZE, GFP_KERNEL);
+ if (ret) {
+ printk(KERN_ERR "error kfifo_alloc\n");
+ return ret;
+ }
+#else
+ INIT_KFIFO(test);
+#endif
+ if (testfunc() < 0) {
+#ifdef DYNAMIC
+ kfifo_free(&test);
+#endif
+ return -EIO;
+ }
+
+ if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) {
+#ifdef DYNAMIC
+ kfifo_free(&test);
+#endif
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void __exit example_exit(void)
+{
+ remove_proc_entry(PROC_FIFO, NULL);
+#ifdef DYNAMIC
+ kfifo_free(&test);
+#endif
+}
+
+module_init(example_init);
+module_exit(example_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c
new file mode 100644
index 000000000..0cf27483c
--- /dev/null
+++ b/samples/kfifo/dma-example.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sample fifo dma implementation
+ *
+ * Copyright (C) 2010 Stefani Seibold <stefani@seibold.net>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kfifo.h>
+
+/*
+ * This module shows how to handle fifo dma operations.
+ */
+
+/* fifo size in elements (bytes) */
+#define FIFO_SIZE 32
+
+static struct kfifo fifo;
+
+static int __init example_init(void)
+{
+ int i;
+ unsigned int ret;
+ unsigned int nents;
+ struct scatterlist sg[10];
+
+ printk(KERN_INFO "DMA fifo test start\n");
+
+ if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
+ printk(KERN_WARNING "error kfifo_alloc\n");
+ return -ENOMEM;
+ }
+
+ printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));
+
+ kfifo_in(&fifo, "test", 4);
+
+ for (i = 0; i != 9; i++)
+ kfifo_put(&fifo, i);
+
+ /* kick away first byte */
+ kfifo_skip(&fifo);
+
+ printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
+
+ /*
+ * Configure the kfifo buffer to receive data from DMA input.
+ *
+ * .--------------------------------------.
+ * | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 |
+ * |---|------------------|---------------|
+ * \_/ \________________/ \_____________/
+ * \ \ \
+ * \ \_allocated data \
+ * \_*free space* \_*free space*
+ *
+ * We need two different SG entries: one for the free space area at the
+ * end of the kfifo buffer (19 bytes) and another for the first free
+ * byte at the beginning, after the kfifo_skip().
+ */
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
+ printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+ if (!nents) {
+ /* fifo is full and no sgl was created */
+ printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
+ return -EIO;
+ }
+
+ /* receive data */
+ printk(KERN_INFO "scatterlist for receive:\n");
+ for (i = 0; i < nents; i++) {
+ printk(KERN_INFO
+ "sg[%d] -> "
+ "page %p offset 0x%.8x length 0x%.8x\n",
+ i, sg_page(&sg[i]), sg[i].offset, sg[i].length);
+
+ if (sg_is_last(&sg[i]))
+ break;
+ }
+
+ /* put here your code to setup and exectute the dma operation */
+ /* ... */
+
+ /* example: zero bytes received */
+ ret = 0;
+
+ /* finish the dma operation and update the received data */
+ kfifo_dma_in_finish(&fifo, ret);
+
+ /* Prepare to transmit data, example: 8 bytes */
+ nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
+ printk(KERN_INFO "DMA sgl entries: %d\n", nents);
+ if (!nents) {
+ /* no data was available and no sgl was created */
+ printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
+ return -EIO;
+ }
+
+ printk(KERN_INFO "scatterlist for transmit:\n");
+ for (i = 0; i < nents; i++) {
+ printk(KERN_INFO
+ "sg[%d] -> "
+ "page %p offset 0x%.8x length 0x%.8x\n",
+ i, sg_page(&sg[i]), sg[i].offset, sg[i].length);
+
+ if (sg_is_last(&sg[i]))
+ break;
+ }
+
+ /* put here your code to setup and exectute the dma operation */
+ /* ... */
+
+ /* example: 5 bytes transmitted */
+ ret = 5;
+
+ /* finish the dma operation and update the transmitted data */
+ kfifo_dma_out_finish(&fifo, ret);
+
+ ret = kfifo_len(&fifo);
+ printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
+
+ if (ret != 7) {
+ printk(KERN_WARNING "size mismatch: test failed");
+ return -EIO;
+ }
+ printk(KERN_INFO "test passed\n");
+
+ return 0;
+}
+
+static void __exit example_exit(void)
+{
+ kfifo_free(&fifo);
+}
+
+module_init(example_init);
+module_exit(example_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
new file mode 100644
index 000000000..c61482ba9
--- /dev/null
+++ b/samples/kfifo/inttype-example.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sample kfifo int type implementation
+ *
+ * Copyright (C) 2010 Stefani Seibold <stefani@seibold.net>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/kfifo.h>
+
+/*
+ * This module shows how to create a int type fifo.
+ */
+
+/* fifo size in elements (ints) */
+#define FIFO_SIZE 32
+
+/* name of the proc entry */
+#define PROC_FIFO "int-fifo"
+
+/* lock for procfs read access */
+static DEFINE_MUTEX(read_access);
+
+/* lock for procfs write access */
+static DEFINE_MUTEX(write_access);
+
+/*
+ * define DYNAMIC in this example for a dynamically allocated fifo.
+ *
+ * Otherwise the fifo storage will be a part of the fifo structure.
+ */
+#if 0
+#define DYNAMIC
+#endif
+
+#ifdef DYNAMIC
+static DECLARE_KFIFO_PTR(test, int);
+#else
+static DEFINE_KFIFO(test, int, FIFO_SIZE);
+#endif
+
+static const int expected_result[FIFO_SIZE] = {
+ 3, 4, 5, 6, 7, 8, 9, 0,
+ 1, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static int __init testfunc(void)
+{
+ int buf[6];
+ int i, j;
+ unsigned int ret;
+
+ printk(KERN_INFO "int fifo test start\n");
+
+ /* put values into the fifo */
+ for (i = 0; i != 10; i++)
+ kfifo_put(&test, i);
+
+ /* show the number of used elements */
+ printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
+
+ /* get max of 2 elements from the fifo */
+ ret = kfifo_out(&test, buf, 2);
+ printk(KERN_INFO "ret: %d\n", ret);
+ /* and put it back to the end of the fifo */
+ ret = kfifo_in(&test, buf, ret);
+ printk(KERN_INFO "ret: %d\n", ret);
+
+ /* skip first element of the fifo */
+ printk(KERN_INFO "skip 1st element\n");
+ kfifo_skip(&test);
+
+ /* put values into the fifo until is full */
+ for (i = 20; kfifo_put(&test, i); i++)
+ ;
+
+ printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));
+
+ /* show the first value without removing from the fifo */
+ if (kfifo_peek(&test, &i))
+ printk(KERN_INFO "%d\n", i);
+
+ /* check the correctness of all values in the fifo */
+ j = 0;
+ while (kfifo_get(&test, &i)) {
+ printk(KERN_INFO "item = %d\n", i);
+ if (i != expected_result[j++]) {
+ printk(KERN_WARNING "value mismatch: test failed\n");
+ return -EIO;
+ }
+ }
+ if (j != ARRAY_SIZE(expected_result)) {
+ printk(KERN_WARNING "size mismatch: test failed\n");
+ return -EIO;
+ }
+ printk(KERN_INFO "test passed\n");
+
+ return 0;
+}
+
+static ssize_t fifo_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ unsigned int copied;
+
+ if (mutex_lock_interruptible(&write_access))
+ return -ERESTARTSYS;
+
+ ret = kfifo_from_user(&test, buf, count, &copied);
+
+ mutex_unlock(&write_access);
+ if (ret)
+ return ret;
+
+ return copied;
+}
+
+static ssize_t fifo_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ unsigned int copied;
+
+ if (mutex_lock_interruptible(&read_access))
+ return -ERESTARTSYS;
+
+ ret = kfifo_to_user(&test, buf, count, &copied);
+
+ mutex_unlock(&read_access);
+ if (ret)
+ return ret;
+
+ return copied;
+}
+
+static const struct proc_ops fifo_proc_ops = {
+ .proc_read = fifo_read,
+ .proc_write = fifo_write,
+ .proc_lseek = noop_llseek,
+};
+
+static int __init example_init(void)
+{
+#ifdef DYNAMIC
+ int ret;
+
+ ret = kfifo_alloc(&test, FIFO_SIZE, GFP_KERNEL);
+ if (ret) {
+ printk(KERN_ERR "error kfifo_alloc\n");
+ return ret;
+ }
+#endif
+ if (testfunc() < 0) {
+#ifdef DYNAMIC
+ kfifo_free(&test);
+#endif
+ return -EIO;
+ }
+
+ if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) {
+#ifdef DYNAMIC
+ kfifo_free(&test);
+#endif
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void __exit example_exit(void)
+{
+ remove_proc_entry(PROC_FIFO, NULL);
+#ifdef DYNAMIC
+ kfifo_free(&test);
+#endif
+}
+
+module_init(example_init);
+module_exit(example_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
new file mode 100644
index 000000000..e4087b2d3
--- /dev/null
+++ b/samples/kfifo/record-example.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sample dynamic sized record fifo implementation
+ *
+ * Copyright (C) 2010 Stefani Seibold <stefani@seibold.net>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/kfifo.h>
+
+/*
+ * This module shows how to create a variable sized record fifo.
+ */
+
+/* fifo size in elements (bytes) */
+#define FIFO_SIZE 128
+
+/* name of the proc entry */
+#define PROC_FIFO "record-fifo"
+
+/* lock for procfs read access */
+static DEFINE_MUTEX(read_access);
+
+/* lock for procfs write access */
+static DEFINE_MUTEX(write_access);
+
+/*
+ * define DYNAMIC in this example for a dynamically allocated fifo.
+ *
+ * Otherwise the fifo storage will be a part of the fifo structure.
+ */
+#if 0
+#define DYNAMIC
+#endif
+
+/*
+ * struct kfifo_rec_ptr_1 and STRUCT_KFIFO_REC_1 can handle records of a
+ * length between 0 and 255 bytes.
+ *
+ * struct kfifo_rec_ptr_2 and STRUCT_KFIFO_REC_2 can handle records of a
+ * length between 0 and 65535 bytes.
+ */
+
+#ifdef DYNAMIC
+struct kfifo_rec_ptr_1 test;
+
+#else
+typedef STRUCT_KFIFO_REC_1(FIFO_SIZE) mytest;
+
+static mytest test;
+#endif
+
+static const char *expected_result[] = {
+ "a",
+ "bb",
+ "ccc",
+ "dddd",
+ "eeeee",
+ "ffffff",
+ "ggggggg",
+ "hhhhhhhh",
+ "iiiiiiiii",
+ "jjjjjjjjjj",
+};
+
+static int __init testfunc(void)
+{
+ char buf[100];
+ unsigned int i;
+ unsigned int ret;
+ struct { unsigned char buf[6]; } hello = { "hello" };
+
+ printk(KERN_INFO "record fifo test start\n");
+
+ kfifo_in(&test, &hello, sizeof(hello));
+
+ /* show the size of the next record in the fifo */
+ printk(KERN_INFO "fifo peek len: %u\n", kfifo_peek_len(&test));
+
+ /* put in variable length data */
+ for (i = 0; i < 10; i++) {
+ memset(buf, 'a' + i, i + 1);
+ kfifo_in(&test, buf, i + 1);
+ }
+
+ /* skip first element of the fifo */
+ printk(KERN_INFO "skip 1st element\n");
+ kfifo_skip(&test);
+
+ printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));
+
+ /* show the first record without removing from the fifo */
+ ret = kfifo_out_peek(&test, buf, sizeof(buf));
+ if (ret)
+ printk(KERN_INFO "%.*s\n", ret, buf);
+
+ /* check the correctness of all values in the fifo */
+ i = 0;
+ while (!kfifo_is_empty(&test)) {
+ ret = kfifo_out(&test, buf, sizeof(buf));
+ buf[ret] = '\0';
+ printk(KERN_INFO "item = %.*s\n", ret, buf);
+ if (strcmp(buf, expected_result[i++])) {
+ printk(KERN_WARNING "value mismatch: test failed\n");
+ return -EIO;
+ }
+ }
+ if (i != ARRAY_SIZE(expected_result)) {
+ printk(KERN_WARNING "size mismatch: test failed\n");
+ return -EIO;
+ }
+ printk(KERN_INFO "test passed\n");
+
+ return 0;
+}
+
+static ssize_t fifo_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ unsigned int copied;
+
+ if (mutex_lock_interruptible(&write_access))
+ return -ERESTARTSYS;
+
+ ret = kfifo_from_user(&test, buf, count, &copied);
+
+ mutex_unlock(&write_access);
+ if (ret)
+ return ret;
+
+ return copied;
+}
+
+static ssize_t fifo_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ unsigned int copied;
+
+ if (mutex_lock_interruptible(&read_access))
+ return -ERESTARTSYS;
+
+ ret = kfifo_to_user(&test, buf, count, &copied);
+
+ mutex_unlock(&read_access);
+ if (ret)
+ return ret;
+
+ return copied;
+}
+
+static const struct proc_ops fifo_proc_ops = {
+ .proc_read = fifo_read,
+ .proc_write = fifo_write,
+ .proc_lseek = noop_llseek,
+};
+
+static int __init example_init(void)
+{
+#ifdef DYNAMIC
+ int ret;
+
+ ret = kfifo_alloc(&test, FIFO_SIZE, GFP_KERNEL);
+ if (ret) {
+ printk(KERN_ERR "error kfifo_alloc\n");
+ return ret;
+ }
+#else
+ INIT_KFIFO(test);
+#endif
+ if (testfunc() < 0) {
+#ifdef DYNAMIC
+ kfifo_free(&test);
+#endif
+ return -EIO;
+ }
+
+ if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) {
+#ifdef DYNAMIC
+ kfifo_free(&test);
+#endif
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void __exit example_exit(void)
+{
+ remove_proc_entry(PROC_FIFO, NULL);
+#ifdef DYNAMIC
+ kfifo_free(&test);
+#endif
+}
+
+module_init(example_init);
+module_exit(example_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
diff --git a/samples/kmemleak/Makefile b/samples/kmemleak/Makefile
new file mode 100644
index 000000000..16b6132c5
--- /dev/null
+++ b/samples/kmemleak/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
diff --git a/samples/kmemleak/kmemleak-test.c b/samples/kmemleak/kmemleak-test.c
new file mode 100644
index 000000000..7b476eb82
--- /dev/null
+++ b/samples/kmemleak/kmemleak-test.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * samples/kmemleak/kmemleak-test.c
+ *
+ * Copyright (C) 2008 ARM Limited
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ */
+
+#define pr_fmt(fmt) "kmemleak: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/percpu.h>
+#include <linux/fdtable.h>
+
+#include <linux/kmemleak.h>
+
+struct test_node {
+ long header[25];
+ struct list_head list;
+ long footer[25];
+};
+
+static LIST_HEAD(test_list);
+static DEFINE_PER_CPU(void *, kmemleak_test_pointer);
+
+/*
+ * Some very simple testing. This function needs to be extended for
+ * proper testing.
+ */
+static int __init kmemleak_test_init(void)
+{
+ struct test_node *elem;
+ int i;
+
+ pr_info("Kmemleak testing\n");
+
+ /* make some orphan objects */
+ pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL));
+ pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL));
+ pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL));
+ pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL));
+ pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL));
+ pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL));
+ pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL));
+ pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL));
+#ifndef CONFIG_MODULES
+ pr_info("kmem_cache_alloc(files_cachep) = %p\n",
+ kmem_cache_alloc(files_cachep, GFP_KERNEL));
+ pr_info("kmem_cache_alloc(files_cachep) = %p\n",
+ kmem_cache_alloc(files_cachep, GFP_KERNEL));
+#endif
+ pr_info("vmalloc(64) = %p\n", vmalloc(64));
+ pr_info("vmalloc(64) = %p\n", vmalloc(64));
+ pr_info("vmalloc(64) = %p\n", vmalloc(64));
+ pr_info("vmalloc(64) = %p\n", vmalloc(64));
+ pr_info("vmalloc(64) = %p\n", vmalloc(64));
+
+ /*
+ * Add elements to a list. They should only appear as orphan
+ * after the module is removed.
+ */
+ for (i = 0; i < 10; i++) {
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ pr_info("kzalloc(sizeof(*elem)) = %p\n", elem);
+ if (!elem)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&elem->list);
+ list_add_tail(&elem->list, &test_list);
+ }
+
+ for_each_possible_cpu(i) {
+ per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
+ pr_info("kmalloc(129) = %p\n",
+ per_cpu(kmemleak_test_pointer, i));
+ }
+
+ return 0;
+}
+module_init(kmemleak_test_init);
+
+static void __exit kmemleak_test_exit(void)
+{
+ struct test_node *elem, *tmp;
+
+ /*
+ * Remove the list elements without actually freeing the
+ * memory.
+ */
+ list_for_each_entry_safe(elem, tmp, &test_list, list)
+ list_del(&elem->list);
+}
+module_exit(kmemleak_test_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/samples/kobject/Makefile b/samples/kobject/Makefile
new file mode 100644
index 000000000..bb5d21997
--- /dev/null
+++ b/samples/kobject/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SAMPLE_KOBJECT) += kobject-example.o kset-example.o
diff --git a/samples/kobject/kobject-example.c b/samples/kobject/kobject-example.c
new file mode 100644
index 000000000..96678ed73
--- /dev/null
+++ b/samples/kobject/kobject-example.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sample kobject implementation
+ *
+ * Copyright (C) 2004-2007 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2007 Novell Inc.
+ */
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+/*
+ * This module shows how to create a simple subdirectory in sysfs called
+ * /sys/kernel/kobject-example In that directory, 3 files are created:
+ * "foo", "baz", and "bar". If an integer is written to these files, it can be
+ * later read out of it.
+ */
+
+static int foo;
+static int baz;
+static int bar;
+
+/*
+ * The "foo" file where a static variable is read from and written to.
+ */
+static ssize_t foo_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", foo);
+}
+
+static ssize_t foo_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ ret = kstrtoint(buf, 10, &foo);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+/* Sysfs attributes cannot be world-writable. */
+static struct kobj_attribute foo_attribute =
+ __ATTR(foo, 0664, foo_show, foo_store);
+
+/*
+ * More complex function where we determine which variable is being accessed by
+ * looking at the attribute for the "baz" and "bar" files.
+ */
+static ssize_t b_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int var;
+
+ if (strcmp(attr->attr.name, "baz") == 0)
+ var = baz;
+ else
+ var = bar;
+ return sysfs_emit(buf, "%d\n", var);
+}
+
+static ssize_t b_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int var, ret;
+
+ ret = kstrtoint(buf, 10, &var);
+ if (ret < 0)
+ return ret;
+
+ if (strcmp(attr->attr.name, "baz") == 0)
+ baz = var;
+ else
+ bar = var;
+ return count;
+}
+
+static struct kobj_attribute baz_attribute =
+ __ATTR(baz, 0664, b_show, b_store);
+static struct kobj_attribute bar_attribute =
+ __ATTR(bar, 0664, b_show, b_store);
+
+
+/*
+ * Create a group of attributes so that we can create and destroy them all
+ * at once.
+ */
+static struct attribute *attrs[] = {
+ &foo_attribute.attr,
+ &baz_attribute.attr,
+ &bar_attribute.attr,
+ NULL, /* need to NULL terminate the list of attributes */
+};
+
+/*
+ * An unnamed attribute group will put all of the attributes directly in
+ * the kobject directory. If we specify a name, a subdirectory will be
+ * created for the attributes with the directory being the name of the
+ * attribute group.
+ */
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *example_kobj;
+
+static int __init example_init(void)
+{
+ int retval;
+
+ /*
+ * Create a simple kobject with the name of "kobject_example",
+ * located under /sys/kernel/
+ *
+ * As this is a simple directory, no uevent will be sent to
+ * userspace. That is why this function should not be used for
+ * any type of dynamic kobjects, where the name and number are
+ * not known ahead of time.
+ */
+ example_kobj = kobject_create_and_add("kobject_example", kernel_kobj);
+ if (!example_kobj)
+ return -ENOMEM;
+
+ /* Create the files associated with this kobject */
+ retval = sysfs_create_group(example_kobj, &attr_group);
+ if (retval)
+ kobject_put(example_kobj);
+
+ return retval;
+}
+
+static void __exit example_exit(void)
+{
+ kobject_put(example_kobj);
+}
+
+module_init(example_init);
+module_exit(example_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Greg Kroah-Hartman <greg@kroah.com>");
diff --git a/samples/kobject/kset-example.c b/samples/kobject/kset-example.c
new file mode 100644
index 000000000..52f1acabd
--- /dev/null
+++ b/samples/kobject/kset-example.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sample kset and ktype implementation
+ *
+ * Copyright (C) 2004-2007 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2007 Novell Inc.
+ */
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+/*
+ * This module shows how to create a kset in sysfs called
+ * /sys/kernel/kset-example
+ * Then tree kobjects are created and assigned to this kset, "foo", "baz",
+ * and "bar". In those kobjects, attributes of the same name are also
+ * created and if an integer is written to these files, it can be later
+ * read out of it.
+ */
+
+
+/*
+ * This is our "object" that we will create a few of and register them with
+ * sysfs.
+ */
+struct foo_obj {
+ struct kobject kobj;
+ int foo;
+ int baz;
+ int bar;
+};
+#define to_foo_obj(x) container_of(x, struct foo_obj, kobj)
+
+/* a custom attribute that works just for a struct foo_obj. */
+struct foo_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct foo_obj *foo, struct foo_attribute *attr, char *buf);
+ ssize_t (*store)(struct foo_obj *foo, struct foo_attribute *attr, const char *buf, size_t count);
+};
+#define to_foo_attr(x) container_of(x, struct foo_attribute, attr)
+
+/*
+ * The default show function that must be passed to sysfs. This will be
+ * called by sysfs for whenever a show function is called by the user on a
+ * sysfs file associated with the kobjects we have registered. We need to
+ * transpose back from a "default" kobject to our custom struct foo_obj and
+ * then call the show function for that specific object.
+ */
+static ssize_t foo_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct foo_attribute *attribute;
+ struct foo_obj *foo;
+
+ attribute = to_foo_attr(attr);
+ foo = to_foo_obj(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(foo, attribute, buf);
+}
+
+/*
+ * Just like the default show function above, but this one is for when the
+ * sysfs "store" is requested (when a value is written to a file.)
+ */
+static ssize_t foo_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct foo_attribute *attribute;
+ struct foo_obj *foo;
+
+ attribute = to_foo_attr(attr);
+ foo = to_foo_obj(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(foo, attribute, buf, len);
+}
+
+/* Our custom sysfs_ops that we will associate with our ktype later on */
+static const struct sysfs_ops foo_sysfs_ops = {
+ .show = foo_attr_show,
+ .store = foo_attr_store,
+};
+
+/*
+ * The release function for our object. This is REQUIRED by the kernel to
+ * have. We free the memory held in our object here.
+ *
+ * NEVER try to get away with just a "blank" release function to try to be
+ * smarter than the kernel. Turns out, no one ever is...
+ */
+static void foo_release(struct kobject *kobj)
+{
+ struct foo_obj *foo;
+
+ foo = to_foo_obj(kobj);
+ kfree(foo);
+}
+
+/*
+ * The "foo" file where the .foo variable is read from and written to.
+ */
+static ssize_t foo_show(struct foo_obj *foo_obj, struct foo_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", foo_obj->foo);
+}
+
+static ssize_t foo_store(struct foo_obj *foo_obj, struct foo_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ ret = kstrtoint(buf, 10, &foo_obj->foo);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+/* Sysfs attributes cannot be world-writable. */
+static struct foo_attribute foo_attribute =
+ __ATTR(foo, 0664, foo_show, foo_store);
+
+/*
+ * More complex function where we determine which variable is being accessed by
+ * looking at the attribute for the "baz" and "bar" files.
+ */
+static ssize_t b_show(struct foo_obj *foo_obj, struct foo_attribute *attr,
+ char *buf)
+{
+ int var;
+
+ if (strcmp(attr->attr.name, "baz") == 0)
+ var = foo_obj->baz;
+ else
+ var = foo_obj->bar;
+ return sysfs_emit(buf, "%d\n", var);
+}
+
+static ssize_t b_store(struct foo_obj *foo_obj, struct foo_attribute *attr,
+ const char *buf, size_t count)
+{
+ int var, ret;
+
+ ret = kstrtoint(buf, 10, &var);
+ if (ret < 0)
+ return ret;
+
+ if (strcmp(attr->attr.name, "baz") == 0)
+ foo_obj->baz = var;
+ else
+ foo_obj->bar = var;
+ return count;
+}
+
+static struct foo_attribute baz_attribute =
+ __ATTR(baz, 0664, b_show, b_store);
+static struct foo_attribute bar_attribute =
+ __ATTR(bar, 0664, b_show, b_store);
+
+/*
+ * Create a group of attributes so that we can create and destroy them all
+ * at once.
+ */
+static struct attribute *foo_default_attrs[] = {
+ &foo_attribute.attr,
+ &baz_attribute.attr,
+ &bar_attribute.attr,
+ NULL, /* need to NULL terminate the list of attributes */
+};
+ATTRIBUTE_GROUPS(foo_default);
+
+/*
+ * Our own ktype for our kobjects. Here we specify our sysfs ops, the
+ * release function, and the set of default attributes we want created
+ * whenever a kobject of this type is registered with the kernel.
+ */
+static struct kobj_type foo_ktype = {
+ .sysfs_ops = &foo_sysfs_ops,
+ .release = foo_release,
+ .default_groups = foo_default_groups,
+};
+
+static struct kset *example_kset;
+static struct foo_obj *foo_obj;
+static struct foo_obj *bar_obj;
+static struct foo_obj *baz_obj;
+
+static struct foo_obj *create_foo_obj(const char *name)
+{
+ struct foo_obj *foo;
+ int retval;
+
+ /* allocate the memory for the whole object */
+ foo = kzalloc(sizeof(*foo), GFP_KERNEL);
+ if (!foo)
+ return NULL;
+
+ /*
+ * As we have a kset for this kobject, we need to set it before calling
+ * the kobject core.
+ */
+ foo->kobj.kset = example_kset;
+
+ /*
+ * Initialize and add the kobject to the kernel. All the default files
+ * will be created here. As we have already specified a kset for this
+ * kobject, we don't have to set a parent for the kobject, the kobject
+ * will be placed beneath that kset automatically.
+ */
+ retval = kobject_init_and_add(&foo->kobj, &foo_ktype, NULL, "%s", name);
+ if (retval) {
+ kobject_put(&foo->kobj);
+ return NULL;
+ }
+
+ /*
+ * We are always responsible for sending the uevent that the kobject
+ * was added to the system.
+ */
+ kobject_uevent(&foo->kobj, KOBJ_ADD);
+
+ return foo;
+}
+
+static void destroy_foo_obj(struct foo_obj *foo)
+{
+ kobject_put(&foo->kobj);
+}
+
+static int __init example_init(void)
+{
+ /*
+ * Create a kset with the name of "kset_example",
+ * located under /sys/kernel/
+ */
+ example_kset = kset_create_and_add("kset_example", NULL, kernel_kobj);
+ if (!example_kset)
+ return -ENOMEM;
+
+ /*
+ * Create three objects and register them with our kset
+ */
+ foo_obj = create_foo_obj("foo");
+ if (!foo_obj)
+ goto foo_error;
+
+ bar_obj = create_foo_obj("bar");
+ if (!bar_obj)
+ goto bar_error;
+
+ baz_obj = create_foo_obj("baz");
+ if (!baz_obj)
+ goto baz_error;
+
+ return 0;
+
+baz_error:
+ destroy_foo_obj(bar_obj);
+bar_error:
+ destroy_foo_obj(foo_obj);
+foo_error:
+ kset_unregister(example_kset);
+ return -EINVAL;
+}
+
+static void __exit example_exit(void)
+{
+ destroy_foo_obj(baz_obj);
+ destroy_foo_obj(bar_obj);
+ destroy_foo_obj(foo_obj);
+ kset_unregister(example_kset);
+}
+
+module_init(example_init);
+module_exit(example_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Greg Kroah-Hartman <greg@kroah.com>");
diff --git a/samples/kprobes/Makefile b/samples/kprobes/Makefile
new file mode 100644
index 000000000..e77459271
--- /dev/null
+++ b/samples/kprobes/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# builds the kprobes example kernel modules;
+# then to use one (as root): insmod <module_name.ko>
+
+obj-$(CONFIG_SAMPLE_KPROBES) += kprobe_example.o
+obj-$(CONFIG_SAMPLE_KRETPROBES) += kretprobe_example.o
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
new file mode 100644
index 000000000..fd346f58d
--- /dev/null
+++ b/samples/kprobes/kprobe_example.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Here's a sample kernel module showing the use of kprobes to dump a
+ * stack trace and selected registers when kernel_clone() is called.
+ *
+ * For more information on theory of operation of kprobes, see
+ * Documentation/trace/kprobes.rst
+ *
+ * You will see the trace data in /var/log/messages and on the console
+ * whenever kernel_clone() is invoked to create a new process.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+
+static char symbol[KSYM_NAME_LEN] = "kernel_clone";
+module_param_string(symbol, symbol, KSYM_NAME_LEN, 0644);
+
+/* For each probe you need to allocate a kprobe structure */
+static struct kprobe kp = {
+ .symbol_name = symbol,
+};
+
+/* kprobe pre_handler: called just before the probed instruction is executed */
+static int __kprobes handler_pre(struct kprobe *p, struct pt_regs *regs)
+{
+#ifdef CONFIG_X86
+ pr_info("<%s> p->addr = 0x%p, ip = %lx, flags = 0x%lx\n",
+ p->symbol_name, p->addr, regs->ip, regs->flags);
+#endif
+#ifdef CONFIG_PPC
+ pr_info("<%s> p->addr = 0x%p, nip = 0x%lx, msr = 0x%lx\n",
+ p->symbol_name, p->addr, regs->nip, regs->msr);
+#endif
+#ifdef CONFIG_MIPS
+ pr_info("<%s> p->addr = 0x%p, epc = 0x%lx, status = 0x%lx\n",
+ p->symbol_name, p->addr, regs->cp0_epc, regs->cp0_status);
+#endif
+#ifdef CONFIG_ARM64
+ pr_info("<%s> p->addr = 0x%p, pc = 0x%lx, pstate = 0x%lx\n",
+ p->symbol_name, p->addr, (long)regs->pc, (long)regs->pstate);
+#endif
+#ifdef CONFIG_ARM
+ pr_info("<%s> p->addr = 0x%p, pc = 0x%lx, cpsr = 0x%lx\n",
+ p->symbol_name, p->addr, (long)regs->ARM_pc, (long)regs->ARM_cpsr);
+#endif
+#ifdef CONFIG_RISCV
+ pr_info("<%s> p->addr = 0x%p, pc = 0x%lx, status = 0x%lx\n",
+ p->symbol_name, p->addr, regs->epc, regs->status);
+#endif
+#ifdef CONFIG_S390
+ pr_info("<%s> p->addr, 0x%p, ip = 0x%lx, flags = 0x%lx\n",
+ p->symbol_name, p->addr, regs->psw.addr, regs->flags);
+#endif
+
+ /* A dump_stack() here will give a stack backtrace */
+ return 0;
+}
+
+/* kprobe post_handler: called after the probed instruction is executed */
+static void __kprobes handler_post(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+#ifdef CONFIG_X86
+ pr_info("<%s> p->addr = 0x%p, flags = 0x%lx\n",
+ p->symbol_name, p->addr, regs->flags);
+#endif
+#ifdef CONFIG_PPC
+ pr_info("<%s> p->addr = 0x%p, msr = 0x%lx\n",
+ p->symbol_name, p->addr, regs->msr);
+#endif
+#ifdef CONFIG_MIPS
+ pr_info("<%s> p->addr = 0x%p, status = 0x%lx\n",
+ p->symbol_name, p->addr, regs->cp0_status);
+#endif
+#ifdef CONFIG_ARM64
+ pr_info("<%s> p->addr = 0x%p, pstate = 0x%lx\n",
+ p->symbol_name, p->addr, (long)regs->pstate);
+#endif
+#ifdef CONFIG_ARM
+ pr_info("<%s> p->addr = 0x%p, cpsr = 0x%lx\n",
+ p->symbol_name, p->addr, (long)regs->ARM_cpsr);
+#endif
+#ifdef CONFIG_RISCV
+ pr_info("<%s> p->addr = 0x%p, status = 0x%lx\n",
+ p->symbol_name, p->addr, regs->status);
+#endif
+#ifdef CONFIG_S390
+ pr_info("<%s> p->addr, 0x%p, flags = 0x%lx\n",
+ p->symbol_name, p->addr, regs->flags);
+#endif
+}
+
+static int __init kprobe_init(void)
+{
+ int ret;
+ kp.pre_handler = handler_pre;
+ kp.post_handler = handler_post;
+
+ ret = register_kprobe(&kp);
+ if (ret < 0) {
+ pr_err("register_kprobe failed, returned %d\n", ret);
+ return ret;
+ }
+ pr_info("Planted kprobe at %p\n", kp.addr);
+ return 0;
+}
+
+static void __exit kprobe_exit(void)
+{
+ unregister_kprobe(&kp);
+ pr_info("kprobe at %p unregistered\n", kp.addr);
+}
+
+module_init(kprobe_init)
+module_exit(kprobe_exit)
+MODULE_LICENSE("GPL");
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c
new file mode 100644
index 000000000..cbf16542d
--- /dev/null
+++ b/samples/kprobes/kretprobe_example.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * kretprobe_example.c
+ *
+ * Here's a sample kernel module showing the use of return probes to
+ * report the return value and total time taken for probed function
+ * to run.
+ *
+ * usage: insmod kretprobe_example.ko func=<func_name>
+ *
+ * If no func_name is specified, kernel_clone is instrumented
+ *
+ * For more information on theory of operation of kretprobes, see
+ * Documentation/trace/kprobes.rst
+ *
+ * Build and insert the kernel module as done in the kprobe example.
+ * You will see the trace data in /var/log/messages and on the console
+ * whenever the probed function returns. (Some messages may be suppressed
+ * if syslogd is configured to eliminate duplicate messages.)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
+
+static char func_name[KSYM_NAME_LEN] = "kernel_clone";
+module_param_string(func, func_name, KSYM_NAME_LEN, 0644);
+MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
+ " function's execution time");
+
+/* per-instance private data */
+struct my_data {
+ ktime_t entry_stamp;
+};
+
+/* Here we use the entry_hanlder to timestamp function entry */
+static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ struct my_data *data;
+
+ if (!current->mm)
+ return 1; /* Skip kernel threads */
+
+ data = (struct my_data *)ri->data;
+ data->entry_stamp = ktime_get();
+ return 0;
+}
+NOKPROBE_SYMBOL(entry_handler);
+
+/*
+ * Return-probe handler: Log the return value and duration. Duration may turn
+ * out to be zero consistently, depending upon the granularity of time
+ * accounting on the platform.
+ */
+static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long retval = regs_return_value(regs);
+ struct my_data *data = (struct my_data *)ri->data;
+ s64 delta;
+ ktime_t now;
+
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, data->entry_stamp));
+ pr_info("%s returned %lu and took %lld ns to execute\n",
+ func_name, retval, (long long)delta);
+ return 0;
+}
+NOKPROBE_SYMBOL(ret_handler);
+
+static struct kretprobe my_kretprobe = {
+ .handler = ret_handler,
+ .entry_handler = entry_handler,
+ .data_size = sizeof(struct my_data),
+ /* Probe up to 20 instances concurrently. */
+ .maxactive = 20,
+};
+
+static int __init kretprobe_init(void)
+{
+ int ret;
+
+ my_kretprobe.kp.symbol_name = func_name;
+ ret = register_kretprobe(&my_kretprobe);
+ if (ret < 0) {
+ pr_err("register_kretprobe failed, returned %d\n", ret);
+ return ret;
+ }
+ pr_info("Planted return probe at %s: %p\n",
+ my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr);
+ return 0;
+}
+
+static void __exit kretprobe_exit(void)
+{
+ unregister_kretprobe(&my_kretprobe);
+ pr_info("kretprobe at %p unregistered\n", my_kretprobe.kp.addr);
+
+ /* nmissed > 0 suggests that maxactive was set too low. */
+ pr_info("Missed probing %d instances of %s\n",
+ my_kretprobe.nmissed, my_kretprobe.kp.symbol_name);
+}
+
+module_init(kretprobe_init)
+module_exit(kretprobe_exit)
+MODULE_LICENSE("GPL");
diff --git a/samples/landlock/.gitignore b/samples/landlock/.gitignore
new file mode 100644
index 000000000..f43668b2d
--- /dev/null
+++ b/samples/landlock/.gitignore
@@ -0,0 +1 @@
+/sandboxer
diff --git a/samples/landlock/Makefile b/samples/landlock/Makefile
new file mode 100644
index 000000000..5d601e51c
--- /dev/null
+++ b/samples/landlock/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+userprogs-always-y := sandboxer
+
+userccflags += -I usr/include
+
+.PHONY: all clean
+
+all:
+ $(MAKE) -C ../.. samples/landlock/
+
+clean:
+ $(MAKE) -C ../.. M=samples/landlock/ clean
diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c
new file mode 100644
index 000000000..f29bb3c72
--- /dev/null
+++ b/samples/landlock/sandboxer.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Simple Landlock sandbox manager able to launch a process restricted by a
+ * user-defined filesystem access control policy.
+ *
+ * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2020 ANSSI
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/landlock.h>
+#include <linux/prctl.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#ifndef landlock_create_ruleset
+static inline int
+landlock_create_ruleset(const struct landlock_ruleset_attr *const attr,
+ const size_t size, const __u32 flags)
+{
+ return syscall(__NR_landlock_create_ruleset, attr, size, flags);
+}
+#endif
+
+#ifndef landlock_add_rule
+static inline int landlock_add_rule(const int ruleset_fd,
+ const enum landlock_rule_type rule_type,
+ const void *const rule_attr,
+ const __u32 flags)
+{
+ return syscall(__NR_landlock_add_rule, ruleset_fd, rule_type, rule_attr,
+ flags);
+}
+#endif
+
+#ifndef landlock_restrict_self
+static inline int landlock_restrict_self(const int ruleset_fd,
+ const __u32 flags)
+{
+ return syscall(__NR_landlock_restrict_self, ruleset_fd, flags);
+}
+#endif
+
+#define ENV_FS_RO_NAME "LL_FS_RO"
+#define ENV_FS_RW_NAME "LL_FS_RW"
+#define ENV_PATH_TOKEN ":"
+
+static int parse_path(char *env_path, const char ***const path_list)
+{
+ int i, num_paths = 0;
+
+ if (env_path) {
+ num_paths++;
+ for (i = 0; env_path[i]; i++) {
+ if (env_path[i] == ENV_PATH_TOKEN[0])
+ num_paths++;
+ }
+ }
+ *path_list = malloc(num_paths * sizeof(**path_list));
+ for (i = 0; i < num_paths; i++)
+ (*path_list)[i] = strsep(&env_path, ENV_PATH_TOKEN);
+
+ return num_paths;
+}
+
+/* clang-format off */
+
+#define ACCESS_FILE ( \
+ LANDLOCK_ACCESS_FS_EXECUTE | \
+ LANDLOCK_ACCESS_FS_WRITE_FILE | \
+ LANDLOCK_ACCESS_FS_READ_FILE)
+
+/* clang-format on */
+
+static int populate_ruleset(const char *const env_var, const int ruleset_fd,
+ const __u64 allowed_access)
+{
+ int num_paths, i, ret = 1;
+ char *env_path_name;
+ const char **path_list = NULL;
+ struct landlock_path_beneath_attr path_beneath = {
+ .parent_fd = -1,
+ };
+
+ env_path_name = getenv(env_var);
+ if (!env_path_name) {
+ /* Prevents users to forget a setting. */
+ fprintf(stderr, "Missing environment variable %s\n", env_var);
+ return 1;
+ }
+ env_path_name = strdup(env_path_name);
+ unsetenv(env_var);
+ num_paths = parse_path(env_path_name, &path_list);
+ if (num_paths == 1 && path_list[0][0] == '\0') {
+ /*
+ * Allows to not use all possible restrictions (e.g. use
+ * LL_FS_RO without LL_FS_RW).
+ */
+ ret = 0;
+ goto out_free_name;
+ }
+
+ for (i = 0; i < num_paths; i++) {
+ struct stat statbuf;
+
+ path_beneath.parent_fd = open(path_list[i], O_PATH | O_CLOEXEC);
+ if (path_beneath.parent_fd < 0) {
+ fprintf(stderr, "Failed to open \"%s\": %s\n",
+ path_list[i], strerror(errno));
+ goto out_free_name;
+ }
+ if (fstat(path_beneath.parent_fd, &statbuf)) {
+ close(path_beneath.parent_fd);
+ goto out_free_name;
+ }
+ path_beneath.allowed_access = allowed_access;
+ if (!S_ISDIR(statbuf.st_mode))
+ path_beneath.allowed_access &= ACCESS_FILE;
+ if (landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0)) {
+ fprintf(stderr,
+ "Failed to update the ruleset with \"%s\": %s\n",
+ path_list[i], strerror(errno));
+ close(path_beneath.parent_fd);
+ goto out_free_name;
+ }
+ close(path_beneath.parent_fd);
+ }
+ ret = 0;
+
+out_free_name:
+ free(path_list);
+ free(env_path_name);
+ return ret;
+}
+
+/* clang-format off */
+
+#define ACCESS_FS_ROUGHLY_READ ( \
+ LANDLOCK_ACCESS_FS_EXECUTE | \
+ LANDLOCK_ACCESS_FS_READ_FILE | \
+ LANDLOCK_ACCESS_FS_READ_DIR)
+
+#define ACCESS_FS_ROUGHLY_WRITE ( \
+ LANDLOCK_ACCESS_FS_WRITE_FILE | \
+ LANDLOCK_ACCESS_FS_REMOVE_DIR | \
+ LANDLOCK_ACCESS_FS_REMOVE_FILE | \
+ LANDLOCK_ACCESS_FS_MAKE_CHAR | \
+ LANDLOCK_ACCESS_FS_MAKE_DIR | \
+ LANDLOCK_ACCESS_FS_MAKE_REG | \
+ LANDLOCK_ACCESS_FS_MAKE_SOCK | \
+ LANDLOCK_ACCESS_FS_MAKE_FIFO | \
+ LANDLOCK_ACCESS_FS_MAKE_BLOCK | \
+ LANDLOCK_ACCESS_FS_MAKE_SYM | \
+ LANDLOCK_ACCESS_FS_REFER)
+
+/* clang-format on */
+
+#define LANDLOCK_ABI_LAST 2
+
+int main(const int argc, char *const argv[], char *const *const envp)
+{
+ const char *cmd_path;
+ char *const *cmd_argv;
+ int ruleset_fd, abi;
+ __u64 access_fs_ro = ACCESS_FS_ROUGHLY_READ,
+ access_fs_rw = ACCESS_FS_ROUGHLY_READ | ACCESS_FS_ROUGHLY_WRITE;
+ struct landlock_ruleset_attr ruleset_attr = {
+ .handled_access_fs = access_fs_rw,
+ };
+
+ if (argc < 2) {
+ fprintf(stderr,
+ "usage: %s=\"...\" %s=\"...\" %s <cmd> [args]...\n\n",
+ ENV_FS_RO_NAME, ENV_FS_RW_NAME, argv[0]);
+ fprintf(stderr,
+ "Launch a command in a restricted environment.\n\n");
+ fprintf(stderr, "Environment variables containing paths, "
+ "each separated by a colon:\n");
+ fprintf(stderr,
+ "* %s: list of paths allowed to be used in a read-only way.\n",
+ ENV_FS_RO_NAME);
+ fprintf(stderr,
+ "* %s: list of paths allowed to be used in a read-write way.\n",
+ ENV_FS_RW_NAME);
+ fprintf(stderr,
+ "\nexample:\n"
+ "%s=\"/bin:/lib:/usr:/proc:/etc:/dev/urandom\" "
+ "%s=\"/dev/null:/dev/full:/dev/zero:/dev/pts:/tmp\" "
+ "%s bash -i\n\n",
+ ENV_FS_RO_NAME, ENV_FS_RW_NAME, argv[0]);
+ fprintf(stderr,
+ "This sandboxer can use Landlock features "
+ "up to ABI version %d.\n",
+ LANDLOCK_ABI_LAST);
+ return 1;
+ }
+
+ abi = landlock_create_ruleset(NULL, 0, LANDLOCK_CREATE_RULESET_VERSION);
+ if (abi < 0) {
+ const int err = errno;
+
+ perror("Failed to check Landlock compatibility");
+ switch (err) {
+ case ENOSYS:
+ fprintf(stderr,
+ "Hint: Landlock is not supported by the current kernel. "
+ "To support it, build the kernel with "
+ "CONFIG_SECURITY_LANDLOCK=y and prepend "
+ "\"landlock,\" to the content of CONFIG_LSM.\n");
+ break;
+ case EOPNOTSUPP:
+ fprintf(stderr,
+ "Hint: Landlock is currently disabled. "
+ "It can be enabled in the kernel configuration by "
+ "prepending \"landlock,\" to the content of CONFIG_LSM, "
+ "or at boot time by setting the same content to the "
+ "\"lsm\" kernel parameter.\n");
+ break;
+ }
+ return 1;
+ }
+
+ /* Best-effort security. */
+ switch (abi) {
+ case 1:
+ /* Removes LANDLOCK_ACCESS_FS_REFER for ABI < 2 */
+ ruleset_attr.handled_access_fs &= ~LANDLOCK_ACCESS_FS_REFER;
+
+ fprintf(stderr,
+ "Hint: You should update the running kernel "
+ "to leverage Landlock features "
+ "provided by ABI version %d (instead of %d).\n",
+ LANDLOCK_ABI_LAST, abi);
+ __attribute__((fallthrough));
+ case LANDLOCK_ABI_LAST:
+ break;
+ default:
+ fprintf(stderr,
+ "Hint: You should update this sandboxer "
+ "to leverage Landlock features "
+ "provided by ABI version %d (instead of %d).\n",
+ abi, LANDLOCK_ABI_LAST);
+ }
+ access_fs_ro &= ruleset_attr.handled_access_fs;
+ access_fs_rw &= ruleset_attr.handled_access_fs;
+
+ ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ if (ruleset_fd < 0) {
+ perror("Failed to create a ruleset");
+ return 1;
+ }
+ if (populate_ruleset(ENV_FS_RO_NAME, ruleset_fd, access_fs_ro)) {
+ goto err_close_ruleset;
+ }
+ if (populate_ruleset(ENV_FS_RW_NAME, ruleset_fd, access_fs_rw)) {
+ goto err_close_ruleset;
+ }
+ if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
+ perror("Failed to restrict privileges");
+ goto err_close_ruleset;
+ }
+ if (landlock_restrict_self(ruleset_fd, 0)) {
+ perror("Failed to enforce ruleset");
+ goto err_close_ruleset;
+ }
+ close(ruleset_fd);
+
+ cmd_path = argv[1];
+ cmd_argv = argv + 1;
+ execvpe(cmd_path, cmd_argv, envp);
+ fprintf(stderr, "Failed to execute \"%s\": %s\n", cmd_path,
+ strerror(errno));
+ fprintf(stderr, "Hint: access to the binary, the interpreter or "
+ "shared libraries may be denied.\n");
+ return 1;
+
+err_close_ruleset:
+ close(ruleset_fd);
+ return 1;
+}
diff --git a/samples/livepatch/Makefile b/samples/livepatch/Makefile
new file mode 100644
index 000000000..9f853eeb6
--- /dev/null
+++ b/samples/livepatch/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-sample.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-mod.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix1.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix2.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-demo.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-mod.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-busymod.o
diff --git a/samples/livepatch/livepatch-callbacks-busymod.c b/samples/livepatch/livepatch-callbacks-busymod.c
new file mode 100644
index 000000000..378e2d402
--- /dev/null
+++ b/samples/livepatch/livepatch-callbacks-busymod.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ */
+
+/*
+ * livepatch-callbacks-busymod.c - (un)patching callbacks demo support module
+ *
+ *
+ * Purpose
+ * -------
+ *
+ * Simple module to demonstrate livepatch (un)patching callbacks.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * This module is not intended to be standalone. See the "Usage"
+ * section of livepatch-callbacks-mod.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+
+static int sleep_secs;
+module_param(sleep_secs, int, 0644);
+MODULE_PARM_DESC(sleep_secs, "sleep_secs (default=0)");
+
+static void busymod_work_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work, busymod_work_func);
+
+static void busymod_work_func(struct work_struct *work)
+{
+ pr_info("%s, sleeping %d seconds ...\n", __func__, sleep_secs);
+ msleep(sleep_secs * 1000);
+ pr_info("%s exit\n", __func__);
+}
+
+static int livepatch_callbacks_mod_init(void)
+{
+ pr_info("%s\n", __func__);
+ schedule_delayed_work(&work,
+ msecs_to_jiffies(1000 * 0));
+ return 0;
+}
+
+static void livepatch_callbacks_mod_exit(void)
+{
+ cancel_delayed_work_sync(&work);
+ pr_info("%s\n", __func__);
+}
+
+module_init(livepatch_callbacks_mod_init);
+module_exit(livepatch_callbacks_mod_exit);
+MODULE_LICENSE("GPL");
diff --git a/samples/livepatch/livepatch-callbacks-demo.c b/samples/livepatch/livepatch-callbacks-demo.c
new file mode 100644
index 000000000..11c3f4357
--- /dev/null
+++ b/samples/livepatch/livepatch-callbacks-demo.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ */
+
+/*
+ * livepatch-callbacks-demo.c - (un)patching callbacks livepatch demo
+ *
+ *
+ * Purpose
+ * -------
+ *
+ * Demonstration of registering livepatch (un)patching callbacks.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * Step 1 - load the simple module
+ *
+ * insmod samples/livepatch/livepatch-callbacks-mod.ko
+ *
+ *
+ * Step 2 - load the demonstration livepatch (with callbacks)
+ *
+ * insmod samples/livepatch/livepatch-callbacks-demo.ko
+ *
+ *
+ * Step 3 - cleanup
+ *
+ * echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ * rmmod livepatch_callbacks_demo
+ * rmmod livepatch_callbacks_mod
+ *
+ * Watch dmesg output to see livepatch enablement, callback execution
+ * and patching operations for both vmlinux and module targets.
+ *
+ * NOTE: swap the insmod order of livepatch-callbacks-mod.ko and
+ * livepatch-callbacks-demo.ko to observe what happens when a
+ * target module is loaded after a livepatch with callbacks.
+ *
+ * NOTE: 'pre_patch_ret' is a module parameter that sets the pre-patch
+ * callback return status. Try setting up a non-zero status
+ * such as -19 (-ENODEV):
+ *
+ * # Load demo livepatch, vmlinux is patched
+ * insmod samples/livepatch/livepatch-callbacks-demo.ko
+ *
+ * # Setup next pre-patch callback to return -ENODEV
+ * echo -19 > /sys/module/livepatch_callbacks_demo/parameters/pre_patch_ret
+ *
+ * # Module loader refuses to load the target module
+ * insmod samples/livepatch/livepatch-callbacks-mod.ko
+ * insmod: ERROR: could not insert module samples/livepatch/livepatch-callbacks-mod.ko: No such device
+ *
+ * NOTE: There is a second target module,
+ * livepatch-callbacks-busymod.ko, available for experimenting
+ * with livepatch (un)patch callbacks. This module contains
+ * a 'sleep_secs' parameter that parks the module on one of the
+ * functions that the livepatch demo module wants to patch.
+ * Modifying this value and tweaking the order of module loads can
+ * effectively demonstrate stalled patch transitions:
+ *
+ * # Load a target module, let it park on 'busymod_work_func' for
+ * # thirty seconds
+ * insmod samples/livepatch/livepatch-callbacks-busymod.ko sleep_secs=30
+ *
+ * # Meanwhile load the livepatch
+ * insmod samples/livepatch/livepatch-callbacks-demo.ko
+ *
+ * # ... then load and unload another target module while the
+ * # transition is in progress
+ * insmod samples/livepatch/livepatch-callbacks-mod.ko
+ * rmmod samples/livepatch/livepatch-callbacks-mod.ko
+ *
+ * # Finally cleanup
+ * echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ * rmmod samples/livepatch/livepatch-callbacks-demo.ko
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+static int pre_patch_ret;
+module_param(pre_patch_ret, int, 0644);
+MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)");
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return pre_patch_ret;
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+static void patched_work_func(struct work_struct *work)
+{
+ pr_info("%s\n", __func__);
+}
+
+static struct klp_func no_funcs[] = {
+ { }
+};
+
+static struct klp_func busymod_funcs[] = {
+ {
+ .old_name = "busymod_work_func",
+ .new_func = patched_work_func,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, {
+ .name = "livepatch_callbacks_mod",
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, {
+ .name = "livepatch_callbacks_busymod",
+ .funcs = busymod_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int livepatch_callbacks_demo_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void livepatch_callbacks_demo_exit(void)
+{
+}
+
+module_init(livepatch_callbacks_demo_init);
+module_exit(livepatch_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-callbacks-mod.c b/samples/livepatch/livepatch-callbacks-mod.c
new file mode 100644
index 000000000..2a074f422
--- /dev/null
+++ b/samples/livepatch/livepatch-callbacks-mod.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ */
+
+/*
+ * livepatch-callbacks-mod.c - (un)patching callbacks demo support module
+ *
+ *
+ * Purpose
+ * -------
+ *
+ * Simple module to demonstrate livepatch (un)patching callbacks.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * This module is not intended to be standalone. See the "Usage"
+ * section of livepatch-callbacks-demo.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+static int livepatch_callbacks_mod_init(void)
+{
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static void livepatch_callbacks_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+}
+
+module_init(livepatch_callbacks_mod_init);
+module_exit(livepatch_callbacks_mod_exit);
+MODULE_LICENSE("GPL");
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c
new file mode 100644
index 000000000..cd76d7ebe
--- /dev/null
+++ b/samples/livepatch/livepatch-sample.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * livepatch-sample.c - Kernel Live Patching Sample Module
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+/*
+ * This (dumb) live patch overrides the function that prints the
+ * kernel boot cmdline when /proc/cmdline is read.
+ *
+ * Example:
+ *
+ * $ cat /proc/cmdline
+ * <your cmdline>
+ *
+ * $ insmod livepatch-sample.ko
+ * $ cat /proc/cmdline
+ * this has been live patched
+ *
+ * $ echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled
+ * $ cat /proc/cmdline
+ * <your cmdline>
+ */
+
+#include <linux/seq_file.h>
+static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%s\n", "this has been live patched");
+ return 0;
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "cmdline_proc_show",
+ .new_func = livepatch_cmdline_proc_show,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ /* name being NULL means vmlinux */
+ .funcs = funcs,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int livepatch_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void livepatch_exit(void)
+{
+}
+
+module_init(livepatch_init);
+module_exit(livepatch_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
new file mode 100644
index 000000000..6701641bf
--- /dev/null
+++ b/samples/livepatch/livepatch-shadow-fix1.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ */
+
+/*
+ * livepatch-shadow-fix1.c - Shadow variables, livepatch demo
+ *
+ * Purpose
+ * -------
+ *
+ * Fixes the memory leak introduced in livepatch-shadow-mod through the
+ * use of a shadow variable. This fix demonstrates the "extending" of
+ * short-lived data structures by patching its allocation and release
+ * functions.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * This module is not intended to be standalone. See the "Usage"
+ * section of livepatch-shadow-mod.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+#include <linux/slab.h>
+
+/* Shadow variable enums */
+#define SV_LEAK 1
+
+/* Allocate new dummies every second */
+#define ALLOC_PERIOD 1
+/* Check for expired dummies after a few new ones have been allocated */
+#define CLEANUP_PERIOD (3 * ALLOC_PERIOD)
+/* Dummies expire after a few cleanup instances */
+#define EXPIRE_PERIOD (4 * CLEANUP_PERIOD)
+
+struct dummy {
+ struct list_head list;
+ unsigned long jiffies_expire;
+};
+
+/*
+ * The constructor makes more sense together with klp_shadow_get_or_alloc().
+ * In this example, it would be safe to assign the pointer also to the shadow
+ * variable returned by klp_shadow_alloc(). But we wanted to show the more
+ * complicated use of the API.
+ */
+static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data)
+{
+ int **shadow_leak = shadow_data;
+ int **leak = ctor_data;
+
+ if (!ctor_data)
+ return -EINVAL;
+
+ *shadow_leak = *leak;
+ return 0;
+}
+
+static struct dummy *livepatch_fix1_dummy_alloc(void)
+{
+ struct dummy *d;
+ int *leak;
+ int **shadow_leak;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return NULL;
+
+ d->jiffies_expire = jiffies +
+ msecs_to_jiffies(1000 * EXPIRE_PERIOD);
+
+ /*
+ * Patch: save the extra memory location into a SV_LEAK shadow
+ * variable. A patched dummy_free routine can later fetch this
+ * pointer to handle resource release.
+ */
+ leak = kzalloc(sizeof(*leak), GFP_KERNEL);
+ if (!leak)
+ goto err_leak;
+
+ shadow_leak = klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
+ shadow_leak_ctor, &leak);
+ if (!shadow_leak) {
+ pr_err("%s: failed to allocate shadow variable for the leaking pointer: dummy @ %p, leak @ %p\n",
+ __func__, d, leak);
+ goto err_shadow;
+ }
+
+ pr_info("%s: dummy @ %p, expires @ %lx\n",
+ __func__, d, d->jiffies_expire);
+
+ return d;
+
+err_shadow:
+ kfree(leak);
+err_leak:
+ kfree(d);
+ return NULL;
+}
+
+static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data)
+{
+ void *d = obj;
+ int **shadow_leak = shadow_data;
+
+ pr_info("%s: dummy @ %p, prevented leak @ %p\n",
+ __func__, d, *shadow_leak);
+ kfree(*shadow_leak);
+}
+
+static void livepatch_fix1_dummy_free(struct dummy *d)
+{
+ int **shadow_leak;
+
+ /*
+ * Patch: fetch the saved SV_LEAK shadow variable, detach and
+ * free it. Note: handle cases where this shadow variable does
+ * not exist (ie, dummy structures allocated before this livepatch
+ * was loaded.)
+ */
+ shadow_leak = klp_shadow_get(d, SV_LEAK);
+ if (shadow_leak)
+ klp_shadow_free(d, SV_LEAK, livepatch_fix1_dummy_leak_dtor);
+ else
+ pr_info("%s: dummy @ %p leaked!\n", __func__, d);
+
+ kfree(d);
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "dummy_alloc",
+ .new_func = livepatch_fix1_dummy_alloc,
+ },
+ {
+ .old_name = "dummy_free",
+ .new_func = livepatch_fix1_dummy_free,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = "livepatch_shadow_mod",
+ .funcs = funcs,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int livepatch_shadow_fix1_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void livepatch_shadow_fix1_exit(void)
+{
+ /* Cleanup any existing SV_LEAK shadow variables */
+ klp_shadow_free_all(SV_LEAK, livepatch_fix1_dummy_leak_dtor);
+}
+
+module_init(livepatch_shadow_fix1_init);
+module_exit(livepatch_shadow_fix1_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c
new file mode 100644
index 000000000..361046a4f
--- /dev/null
+++ b/samples/livepatch/livepatch-shadow-fix2.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ */
+
+/*
+ * livepatch-shadow-fix2.c - Shadow variables, livepatch demo
+ *
+ * Purpose
+ * -------
+ *
+ * Adds functionality to livepatch-shadow-mod's in-flight data
+ * structures through a shadow variable. The livepatch patches a
+ * routine that periodically inspects data structures, incrementing a
+ * per-data-structure counter, creating the counter if needed.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * This module is not intended to be standalone. See the "Usage"
+ * section of livepatch-shadow-mod.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+#include <linux/slab.h>
+
+/* Shadow variable enums */
+#define SV_LEAK 1
+#define SV_COUNTER 2
+
+struct dummy {
+ struct list_head list;
+ unsigned long jiffies_expire;
+};
+
+static bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies)
+{
+ int *shadow_count;
+
+ /*
+ * Patch: handle in-flight dummy structures, if they do not
+ * already have a SV_COUNTER shadow variable, then attach a
+ * new one.
+ */
+ shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER,
+ sizeof(*shadow_count), GFP_NOWAIT,
+ NULL, NULL);
+ if (shadow_count)
+ *shadow_count += 1;
+
+ return time_after(jiffies, d->jiffies_expire);
+}
+
+static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data)
+{
+ void *d = obj;
+ int **shadow_leak = shadow_data;
+
+ pr_info("%s: dummy @ %p, prevented leak @ %p\n",
+ __func__, d, *shadow_leak);
+ kfree(*shadow_leak);
+}
+
+static void livepatch_fix2_dummy_free(struct dummy *d)
+{
+ int **shadow_leak;
+ int *shadow_count;
+
+ /* Patch: copy the memory leak patch from the fix1 module. */
+ shadow_leak = klp_shadow_get(d, SV_LEAK);
+ if (shadow_leak)
+ klp_shadow_free(d, SV_LEAK, livepatch_fix2_dummy_leak_dtor);
+ else
+ pr_info("%s: dummy @ %p leaked!\n", __func__, d);
+
+ /*
+ * Patch: fetch the SV_COUNTER shadow variable and display
+ * the final count. Detach the shadow variable.
+ */
+ shadow_count = klp_shadow_get(d, SV_COUNTER);
+ if (shadow_count) {
+ pr_info("%s: dummy @ %p, check counter = %d\n",
+ __func__, d, *shadow_count);
+ klp_shadow_free(d, SV_COUNTER, NULL);
+ }
+
+ kfree(d);
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "dummy_check",
+ .new_func = livepatch_fix2_dummy_check,
+ },
+ {
+ .old_name = "dummy_free",
+ .new_func = livepatch_fix2_dummy_free,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = "livepatch_shadow_mod",
+ .funcs = funcs,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int livepatch_shadow_fix2_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void livepatch_shadow_fix2_exit(void)
+{
+ /* Cleanup any existing SV_COUNTER shadow variables */
+ klp_shadow_free_all(SV_COUNTER, NULL);
+}
+
+module_init(livepatch_shadow_fix2_init);
+module_exit(livepatch_shadow_fix2_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c
new file mode 100644
index 000000000..7e753b0d2
--- /dev/null
+++ b/samples/livepatch/livepatch-shadow-mod.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ */
+
+/*
+ * livepatch-shadow-mod.c - Shadow variables, buggy module demo
+ *
+ * Purpose
+ * -------
+ *
+ * As a demonstration of livepatch shadow variable API, this module
+ * introduces memory leak behavior that livepatch modules
+ * livepatch-shadow-fix1.ko and livepatch-shadow-fix2.ko correct and
+ * enhance.
+ *
+ * WARNING - even though the livepatch-shadow-fix modules patch the
+ * memory leak, please load these modules at your own risk -- some
+ * amount of memory may leaked before the bug is patched.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * Step 1 - Load the buggy demonstration module:
+ *
+ * insmod samples/livepatch/livepatch-shadow-mod.ko
+ *
+ * Watch dmesg output for a few moments to see new dummy being allocated
+ * and a periodic cleanup check. (Note: a small amount of memory is
+ * being leaked.)
+ *
+ *
+ * Step 2 - Load livepatch fix1:
+ *
+ * insmod samples/livepatch/livepatch-shadow-fix1.ko
+ *
+ * Continue watching dmesg and note that now livepatch_fix1_dummy_free()
+ * and livepatch_fix1_dummy_alloc() are logging messages about leaked
+ * memory and eventually leaks prevented.
+ *
+ *
+ * Step 3 - Load livepatch fix2 (on top of fix1):
+ *
+ * insmod samples/livepatch/livepatch-shadow-fix2.ko
+ *
+ * This module extends functionality through shadow variables, as a new
+ * "check" counter is added to the dummy structure. Periodic dmesg
+ * messages will log these as dummies are cleaned up.
+ *
+ *
+ * Step 4 - Cleanup
+ *
+ * Unwind the demonstration by disabling the livepatch fix modules, then
+ * removing them and the demo module:
+ *
+ * echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix2/enabled
+ * echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix1/enabled
+ * rmmod livepatch-shadow-fix2
+ * rmmod livepatch-shadow-fix1
+ * rmmod livepatch-shadow-mod
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/workqueue.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Buggy module for shadow variable demo");
+
+/* Allocate new dummies every second */
+#define ALLOC_PERIOD 1
+/* Check for expired dummies after a few new ones have been allocated */
+#define CLEANUP_PERIOD (3 * ALLOC_PERIOD)
+/* Dummies expire after a few cleanup instances */
+#define EXPIRE_PERIOD (4 * CLEANUP_PERIOD)
+
+/*
+ * Keep a list of all the dummies so we can clean up any residual ones
+ * on module exit
+ */
+static LIST_HEAD(dummy_list);
+static DEFINE_MUTEX(dummy_list_mutex);
+
+struct dummy {
+ struct list_head list;
+ unsigned long jiffies_expire;
+};
+
+static __used noinline struct dummy *dummy_alloc(void)
+{
+ struct dummy *d;
+ int *leak;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return NULL;
+
+ d->jiffies_expire = jiffies +
+ msecs_to_jiffies(1000 * EXPIRE_PERIOD);
+
+ /* Oops, forgot to save leak! */
+ leak = kzalloc(sizeof(*leak), GFP_KERNEL);
+ if (!leak) {
+ kfree(d);
+ return NULL;
+ }
+
+ pr_info("%s: dummy @ %p, expires @ %lx\n",
+ __func__, d, d->jiffies_expire);
+
+ return d;
+}
+
+static __used noinline void dummy_free(struct dummy *d)
+{
+ pr_info("%s: dummy @ %p, expired = %lx\n",
+ __func__, d, d->jiffies_expire);
+
+ kfree(d);
+}
+
+static __used noinline bool dummy_check(struct dummy *d,
+ unsigned long jiffies)
+{
+ return time_after(jiffies, d->jiffies_expire);
+}
+
+/*
+ * alloc_work_func: allocates new dummy structures, allocates additional
+ * memory, aptly named "leak", but doesn't keep
+ * permanent record of it.
+ */
+
+static void alloc_work_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(alloc_dwork, alloc_work_func);
+
+static void alloc_work_func(struct work_struct *work)
+{
+ struct dummy *d;
+
+ d = dummy_alloc();
+ if (!d)
+ return;
+
+ mutex_lock(&dummy_list_mutex);
+ list_add(&d->list, &dummy_list);
+ mutex_unlock(&dummy_list_mutex);
+
+ schedule_delayed_work(&alloc_dwork,
+ msecs_to_jiffies(1000 * ALLOC_PERIOD));
+}
+
+/*
+ * cleanup_work_func: frees dummy structures. Without knownledge of
+ * "leak", it leaks the additional memory that
+ * alloc_work_func created.
+ */
+
+static void cleanup_work_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cleanup_dwork, cleanup_work_func);
+
+static void cleanup_work_func(struct work_struct *work)
+{
+ struct dummy *d, *tmp;
+ unsigned long j;
+
+ j = jiffies;
+ pr_info("%s: jiffies = %lx\n", __func__, j);
+
+ mutex_lock(&dummy_list_mutex);
+ list_for_each_entry_safe(d, tmp, &dummy_list, list) {
+
+ /* Kick out and free any expired dummies */
+ if (dummy_check(d, j)) {
+ list_del(&d->list);
+ dummy_free(d);
+ }
+ }
+ mutex_unlock(&dummy_list_mutex);
+
+ schedule_delayed_work(&cleanup_dwork,
+ msecs_to_jiffies(1000 * CLEANUP_PERIOD));
+}
+
+static int livepatch_shadow_mod_init(void)
+{
+ schedule_delayed_work(&alloc_dwork,
+ msecs_to_jiffies(1000 * ALLOC_PERIOD));
+ schedule_delayed_work(&cleanup_dwork,
+ msecs_to_jiffies(1000 * CLEANUP_PERIOD));
+
+ return 0;
+}
+
+static void livepatch_shadow_mod_exit(void)
+{
+ struct dummy *d, *tmp;
+
+ /* Wait for any dummies at work */
+ cancel_delayed_work_sync(&alloc_dwork);
+ cancel_delayed_work_sync(&cleanup_dwork);
+
+ /* Cleanup residual dummies */
+ list_for_each_entry_safe(d, tmp, &dummy_list, list) {
+ list_del(&d->list);
+ dummy_free(d);
+ }
+}
+
+module_init(livepatch_shadow_mod_init);
+module_exit(livepatch_shadow_mod_exit);
diff --git a/samples/mei/.gitignore b/samples/mei/.gitignore
new file mode 100644
index 000000000..fe894bcb6
--- /dev/null
+++ b/samples/mei/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/mei-amt-version
diff --git a/samples/mei/Makefile b/samples/mei/Makefile
new file mode 100644
index 000000000..c54b8a0ab
--- /dev/null
+++ b/samples/mei/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+userprogs-always-y += mei-amt-version
+
+userccflags += -I usr/include
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
new file mode 100644
index 000000000..867debd3b
--- /dev/null
+++ b/samples/mei/mei-amt-version.c
@@ -0,0 +1,488 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bits/wordsize.h>
+#include <linux/mei.h>
+
+/*****************************************************************************
+ * Intel Management Engine Interface
+ *****************************************************************************/
+
+#define mei_msg(_me, fmt, ARGS...) do { \
+ if (_me->verbose) \
+ fprintf(stderr, fmt, ##ARGS); \
+} while (0)
+
+#define mei_err(_me, fmt, ARGS...) do { \
+ fprintf(stderr, "Error: " fmt, ##ARGS); \
+} while (0)
+
+struct mei {
+ uuid_le guid;
+ bool initialized;
+ bool verbose;
+ unsigned int buf_size;
+ unsigned char prot_ver;
+ int fd;
+};
+
+static void mei_deinit(struct mei *cl)
+{
+ if (cl->fd != -1)
+ close(cl->fd);
+ cl->fd = -1;
+ cl->buf_size = 0;
+ cl->prot_ver = 0;
+ cl->initialized = false;
+}
+
+static bool mei_init(struct mei *me, const uuid_le *guid,
+ unsigned char req_protocol_version, bool verbose)
+{
+ int result;
+ struct mei_client *cl;
+ struct mei_connect_client_data data;
+
+ me->verbose = verbose;
+
+ me->fd = open("/dev/mei0", O_RDWR);
+ if (me->fd == -1) {
+ mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
+ goto err;
+ }
+ memcpy(&me->guid, guid, sizeof(*guid));
+ memset(&data, 0, sizeof(data));
+ me->initialized = true;
+
+ memcpy(&data.in_client_uuid, &me->guid, sizeof(me->guid));
+ result = ioctl(me->fd, IOCTL_MEI_CONNECT_CLIENT, &data);
+ if (result) {
+ mei_err(me, "IOCTL_MEI_CONNECT_CLIENT receive message. err=%d\n", result);
+ goto err;
+ }
+ cl = &data.out_client_properties;
+ mei_msg(me, "max_message_length %d\n", cl->max_msg_length);
+ mei_msg(me, "protocol_version %d\n", cl->protocol_version);
+
+ if ((req_protocol_version > 0) &&
+ (cl->protocol_version != req_protocol_version)) {
+ mei_err(me, "Intel MEI protocol version not supported\n");
+ goto err;
+ }
+
+ me->buf_size = cl->max_msg_length;
+ me->prot_ver = cl->protocol_version;
+
+ return true;
+err:
+ mei_deinit(me);
+ return false;
+}
+
+static ssize_t mei_recv_msg(struct mei *me, unsigned char *buffer,
+ ssize_t len, unsigned long timeout)
+{
+ struct timeval tv;
+ fd_set set;
+ ssize_t rc;
+
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000000;
+
+ mei_msg(me, "call read length = %zd\n", len);
+
+ FD_ZERO(&set);
+ FD_SET(me->fd, &set);
+ rc = select(me->fd + 1, &set, NULL, NULL, &tv);
+ if (rc > 0 && FD_ISSET(me->fd, &set)) {
+ mei_msg(me, "have reply\n");
+ } else if (rc == 0) {
+ rc = -1;
+ mei_err(me, "read failed on timeout\n");
+ goto out;
+ } else { /* rc < 0 */
+ rc = errno;
+ mei_err(me, "read failed on select with status %zd %s\n",
+ rc, strerror(errno));
+ goto out;
+ }
+
+ rc = read(me->fd, buffer, len);
+ if (rc < 0) {
+ mei_err(me, "read failed with status %zd %s\n",
+ rc, strerror(errno));
+ goto out;
+ }
+
+ mei_msg(me, "read succeeded with result %zd\n", rc);
+
+out:
+ if (rc < 0)
+ mei_deinit(me);
+
+ return rc;
+}
+
+static ssize_t mei_send_msg(struct mei *me, const unsigned char *buffer,
+ ssize_t len, unsigned long timeout)
+{
+ ssize_t written;
+ ssize_t rc;
+
+ mei_msg(me, "call write length = %zd\n", len);
+
+ written = write(me->fd, buffer, len);
+ if (written < 0) {
+ rc = -errno;
+ mei_err(me, "write failed with status %zd %s\n",
+ written, strerror(errno));
+ goto out;
+ }
+ mei_msg(me, "write success\n");
+
+ rc = written;
+out:
+ if (rc < 0)
+ mei_deinit(me);
+
+ return rc;
+}
+
+/***************************************************************************
+ * Intel Advanced Management Technology ME Client
+ ***************************************************************************/
+
+#define AMT_MAJOR_VERSION 1
+#define AMT_MINOR_VERSION 1
+
+#define AMT_STATUS_SUCCESS 0x0
+#define AMT_STATUS_INTERNAL_ERROR 0x1
+#define AMT_STATUS_NOT_READY 0x2
+#define AMT_STATUS_INVALID_AMT_MODE 0x3
+#define AMT_STATUS_INVALID_MESSAGE_LENGTH 0x4
+
+#define AMT_STATUS_HOST_IF_EMPTY_RESPONSE 0x4000
+#define AMT_STATUS_SDK_RESOURCES 0x1004
+
+
+#define AMT_BIOS_VERSION_LEN 65
+#define AMT_VERSIONS_NUMBER 50
+#define AMT_UNICODE_STRING_LEN 20
+
+struct amt_unicode_string {
+ uint16_t length;
+ char string[AMT_UNICODE_STRING_LEN];
+} __attribute__((packed));
+
+struct amt_version_type {
+ struct amt_unicode_string description;
+ struct amt_unicode_string version;
+} __attribute__((packed));
+
+struct amt_version {
+ uint8_t major;
+ uint8_t minor;
+} __attribute__((packed));
+
+struct amt_code_versions {
+ uint8_t bios[AMT_BIOS_VERSION_LEN];
+ uint32_t count;
+ struct amt_version_type versions[AMT_VERSIONS_NUMBER];
+} __attribute__((packed));
+
+/***************************************************************************
+ * Intel Advanced Management Technology Host Interface
+ ***************************************************************************/
+
+struct amt_host_if_msg_header {
+ struct amt_version version;
+ uint16_t _reserved;
+ uint32_t command;
+ uint32_t length;
+} __attribute__((packed));
+
+struct amt_host_if_resp_header {
+ struct amt_host_if_msg_header header;
+ uint32_t status;
+ unsigned char data[];
+} __attribute__((packed));
+
+const uuid_le MEI_IAMTHIF = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, \
+ 0xac, 0xa8, 0x46, 0xe0, 0xff, 0x65, 0x81, 0x4c);
+
+#define AMT_HOST_IF_CODE_VERSIONS_REQUEST 0x0400001A
+#define AMT_HOST_IF_CODE_VERSIONS_RESPONSE 0x0480001A
+
+const struct amt_host_if_msg_header CODE_VERSION_REQ = {
+ .version = {AMT_MAJOR_VERSION, AMT_MINOR_VERSION},
+ ._reserved = 0,
+ .command = AMT_HOST_IF_CODE_VERSIONS_REQUEST,
+ .length = 0
+};
+
+
+struct amt_host_if {
+ struct mei mei_cl;
+ unsigned long send_timeout;
+ bool initialized;
+};
+
+
+static bool amt_host_if_init(struct amt_host_if *acmd,
+ unsigned long send_timeout, bool verbose)
+{
+ acmd->send_timeout = (send_timeout) ? send_timeout : 20000;
+ acmd->initialized = mei_init(&acmd->mei_cl, &MEI_IAMTHIF, 0, verbose);
+ return acmd->initialized;
+}
+
+static void amt_host_if_deinit(struct amt_host_if *acmd)
+{
+ mei_deinit(&acmd->mei_cl);
+ acmd->initialized = false;
+}
+
+static uint32_t amt_verify_code_versions(const struct amt_host_if_resp_header *resp)
+{
+ uint32_t status = AMT_STATUS_SUCCESS;
+ struct amt_code_versions *code_ver;
+ size_t code_ver_len;
+ uint32_t ver_type_cnt;
+ uint32_t len;
+ uint32_t i;
+
+ code_ver = (struct amt_code_versions *)resp->data;
+ /* length - sizeof(status) */
+ code_ver_len = resp->header.length - sizeof(uint32_t);
+ ver_type_cnt = code_ver_len -
+ sizeof(code_ver->bios) -
+ sizeof(code_ver->count);
+ if (code_ver->count != ver_type_cnt / sizeof(struct amt_version_type)) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+
+ for (i = 0; i < code_ver->count; i++) {
+ len = code_ver->versions[i].description.length;
+
+ if (len > AMT_UNICODE_STRING_LEN) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+
+ len = code_ver->versions[i].version.length;
+ if (code_ver->versions[i].version.string[len] != '\0' ||
+ len != strlen(code_ver->versions[i].version.string)) {
+ status = AMT_STATUS_INTERNAL_ERROR;
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+static uint32_t amt_verify_response_header(uint32_t command,
+ const struct amt_host_if_msg_header *resp_hdr,
+ uint32_t response_size)
+{
+ if (response_size < sizeof(struct amt_host_if_resp_header)) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (response_size != (resp_hdr->length +
+ sizeof(struct amt_host_if_msg_header))) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->command != command) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->_reserved != 0) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ } else if (resp_hdr->version.major != AMT_MAJOR_VERSION ||
+ resp_hdr->version.minor < AMT_MINOR_VERSION) {
+ return AMT_STATUS_INTERNAL_ERROR;
+ }
+ return AMT_STATUS_SUCCESS;
+}
+
+static uint32_t amt_host_if_call(struct amt_host_if *acmd,
+ const unsigned char *command, ssize_t command_sz,
+ uint8_t **read_buf, uint32_t rcmd,
+ unsigned int expected_sz)
+{
+ uint32_t in_buf_sz;
+ ssize_t out_buf_sz;
+ ssize_t written;
+ uint32_t status;
+ struct amt_host_if_resp_header *msg_hdr;
+
+ in_buf_sz = acmd->mei_cl.buf_size;
+ *read_buf = (uint8_t *)malloc(sizeof(uint8_t) * in_buf_sz);
+ if (*read_buf == NULL)
+ return AMT_STATUS_SDK_RESOURCES;
+ memset(*read_buf, 0, in_buf_sz);
+ msg_hdr = (struct amt_host_if_resp_header *)*read_buf;
+
+ written = mei_send_msg(&acmd->mei_cl,
+ command, command_sz, acmd->send_timeout);
+ if (written != command_sz)
+ return AMT_STATUS_INTERNAL_ERROR;
+
+ out_buf_sz = mei_recv_msg(&acmd->mei_cl, *read_buf, in_buf_sz, 2000);
+ if (out_buf_sz <= 0)
+ return AMT_STATUS_HOST_IF_EMPTY_RESPONSE;
+
+ status = msg_hdr->status;
+ if (status != AMT_STATUS_SUCCESS)
+ return status;
+
+ status = amt_verify_response_header(rcmd,
+ &msg_hdr->header, out_buf_sz);
+ if (status != AMT_STATUS_SUCCESS)
+ return status;
+
+ if (expected_sz && expected_sz != out_buf_sz)
+ return AMT_STATUS_INTERNAL_ERROR;
+
+ return AMT_STATUS_SUCCESS;
+}
+
+
+static uint32_t amt_get_code_versions(struct amt_host_if *cmd,
+ struct amt_code_versions *versions)
+{
+ struct amt_host_if_resp_header *response = NULL;
+ uint32_t status;
+
+ status = amt_host_if_call(cmd,
+ (const unsigned char *)&CODE_VERSION_REQ,
+ sizeof(CODE_VERSION_REQ),
+ (uint8_t **)&response,
+ AMT_HOST_IF_CODE_VERSIONS_RESPONSE, 0);
+
+ if (status != AMT_STATUS_SUCCESS)
+ goto out;
+
+ status = amt_verify_code_versions(response);
+ if (status != AMT_STATUS_SUCCESS)
+ goto out;
+
+ memcpy(versions, response->data, sizeof(struct amt_code_versions));
+out:
+ if (response != NULL)
+ free(response);
+
+ return status;
+}
+
+/************************** end of amt_host_if_command ***********************/
+int main(int argc, char **argv)
+{
+ struct amt_code_versions ver;
+ struct amt_host_if acmd;
+ unsigned int i;
+ uint32_t status;
+ int ret;
+ bool verbose;
+
+ verbose = (argc > 1 && strcmp(argv[1], "-v") == 0);
+
+ if (!amt_host_if_init(&acmd, 5000, verbose)) {
+ ret = 1;
+ goto out;
+ }
+
+ status = amt_get_code_versions(&acmd, &ver);
+
+ amt_host_if_deinit(&acmd);
+
+ switch (status) {
+ case AMT_STATUS_HOST_IF_EMPTY_RESPONSE:
+ printf("Intel AMT: DISABLED\n");
+ ret = 0;
+ break;
+ case AMT_STATUS_SUCCESS:
+ printf("Intel AMT: ENABLED\n");
+ for (i = 0; i < ver.count; i++) {
+ printf("%s:\t%s\n", ver.versions[i].description.string,
+ ver.versions[i].version.string);
+ }
+ ret = 0;
+ break;
+ default:
+ printf("An error has occurred\n");
+ ret = 1;
+ break;
+ }
+
+out:
+ return ret;
+}
diff --git a/samples/nitro_enclaves/.gitignore b/samples/nitro_enclaves/.gitignore
new file mode 100644
index 000000000..6a718eec7
--- /dev/null
+++ b/samples/nitro_enclaves/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+/ne_ioctl_sample
diff --git a/samples/nitro_enclaves/Makefile b/samples/nitro_enclaves/Makefile
new file mode 100644
index 000000000..a3ec78fef
--- /dev/null
+++ b/samples/nitro_enclaves/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+
+# Enclave lifetime management support for Nitro Enclaves (NE) - ioctl sample
+# usage.
+
+.PHONY: all clean
+
+CFLAGS += -Wall
+
+all:
+ $(CC) $(CFLAGS) -o ne_ioctl_sample ne_ioctl_sample.c -lpthread
+
+clean:
+ rm -f ne_ioctl_sample
diff --git a/samples/nitro_enclaves/ne_ioctl_sample.c b/samples/nitro_enclaves/ne_ioctl_sample.c
new file mode 100644
index 000000000..765b131c7
--- /dev/null
+++ b/samples/nitro_enclaves/ne_ioctl_sample.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+/**
+ * DOC: Sample flow of using the ioctl interface provided by the Nitro Enclaves (NE)
+ * kernel driver.
+ *
+ * Usage
+ * -----
+ *
+ * Load the nitro_enclaves module, setting also the enclave CPU pool. The
+ * enclave CPUs need to be full cores from the same NUMA node. CPU 0 and its
+ * siblings have to remain available for the primary / parent VM, so they
+ * cannot be included in the enclave CPU pool.
+ *
+ * See the cpu list section from the kernel documentation.
+ * https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html#cpu-lists
+ *
+ * insmod drivers/virt/nitro_enclaves/nitro_enclaves.ko
+ * lsmod
+ *
+ * The CPU pool can be set at runtime, after the kernel module is loaded.
+ *
+ * echo <cpu-list> > /sys/module/nitro_enclaves/parameters/ne_cpus
+ *
+ * NUMA and CPU siblings information can be found using:
+ *
+ * lscpu
+ * /proc/cpuinfo
+ *
+ * Check the online / offline CPU list. The CPUs from the pool should be
+ * offlined.
+ *
+ * lscpu
+ *
+ * Check dmesg for any warnings / errors through the NE driver lifetime / usage.
+ * The NE logs contain the "nitro_enclaves" or "pci 0000:00:02.0" pattern.
+ *
+ * dmesg
+ *
+ * Setup hugetlbfs huge pages. The memory needs to be from the same NUMA node as
+ * the enclave CPUs.
+ *
+ * https://www.kernel.org/doc/html/latest/admin-guide/mm/hugetlbpage.html
+ *
+ * By default, the allocation of hugetlb pages are distributed on all possible
+ * NUMA nodes. Use the following configuration files to set the number of huge
+ * pages from a NUMA node:
+ *
+ * /sys/devices/system/node/node<X>/hugepages/hugepages-2048kB/nr_hugepages
+ * /sys/devices/system/node/node<X>/hugepages/hugepages-1048576kB/nr_hugepages
+ *
+ * or, if not on a system with multiple NUMA nodes, can also set the number
+ * of 2 MiB / 1 GiB huge pages using
+ *
+ * /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
+ * /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
+ *
+ * In this example 256 hugepages of 2 MiB are used.
+ *
+ * Build and run the NE sample.
+ *
+ * make -C samples/nitro_enclaves clean
+ * make -C samples/nitro_enclaves
+ * ./samples/nitro_enclaves/ne_ioctl_sample <path_to_enclave_image>
+ *
+ * Unload the nitro_enclaves module.
+ *
+ * rmmod nitro_enclaves
+ * lsmod
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <poll.h>
+#include <pthread.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <linux/mman.h>
+#include <linux/nitro_enclaves.h>
+#include <linux/vm_sockets.h>
+
+/**
+ * NE_DEV_NAME - Nitro Enclaves (NE) misc device that provides the ioctl interface.
+ */
+#define NE_DEV_NAME "/dev/nitro_enclaves"
+
+/**
+ * NE_POLL_WAIT_TIME - Timeout in seconds for each poll event.
+ */
+#define NE_POLL_WAIT_TIME (60)
+/**
+ * NE_POLL_WAIT_TIME_MS - Timeout in milliseconds for each poll event.
+ */
+#define NE_POLL_WAIT_TIME_MS (NE_POLL_WAIT_TIME * 1000)
+
+/**
+ * NE_SLEEP_TIME - Amount of time in seconds for the process to keep the enclave alive.
+ */
+#define NE_SLEEP_TIME (300)
+
+/**
+ * NE_DEFAULT_NR_VCPUS - Default number of vCPUs set for an enclave.
+ */
+#define NE_DEFAULT_NR_VCPUS (2)
+
+/**
+ * NE_MIN_MEM_REGION_SIZE - Minimum size of a memory region - 2 MiB.
+ */
+#define NE_MIN_MEM_REGION_SIZE (2 * 1024 * 1024)
+
+/**
+ * NE_DEFAULT_NR_MEM_REGIONS - Default number of memory regions of 2 MiB set for
+ * an enclave.
+ */
+#define NE_DEFAULT_NR_MEM_REGIONS (256)
+
+/**
+ * NE_IMAGE_LOAD_HEARTBEAT_CID - Vsock CID for enclave image loading heartbeat logic.
+ */
+#define NE_IMAGE_LOAD_HEARTBEAT_CID (3)
+/**
+ * NE_IMAGE_LOAD_HEARTBEAT_PORT - Vsock port for enclave image loading heartbeat logic.
+ */
+#define NE_IMAGE_LOAD_HEARTBEAT_PORT (9000)
+/**
+ * NE_IMAGE_LOAD_HEARTBEAT_VALUE - Heartbeat value for enclave image loading.
+ */
+#define NE_IMAGE_LOAD_HEARTBEAT_VALUE (0xb7)
+
+/**
+ * struct ne_user_mem_region - User space memory region set for an enclave.
+ * @userspace_addr: Address of the user space memory region.
+ * @memory_size: Size of the user space memory region.
+ */
+struct ne_user_mem_region {
+ void *userspace_addr;
+ size_t memory_size;
+};
+
+/**
+ * ne_create_vm() - Create a slot for the enclave VM.
+ * @ne_dev_fd: The file descriptor of the NE misc device.
+ * @slot_uid: The generated slot uid for the enclave.
+ * @enclave_fd : The generated file descriptor for the enclave.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_create_vm(int ne_dev_fd, unsigned long *slot_uid, int *enclave_fd)
+{
+ int rc = -EINVAL;
+ *enclave_fd = ioctl(ne_dev_fd, NE_CREATE_VM, slot_uid);
+
+ if (*enclave_fd < 0) {
+ rc = *enclave_fd;
+ switch (errno) {
+ case NE_ERR_NO_CPUS_AVAIL_IN_POOL: {
+ printf("Error in create VM, no CPUs available in the NE CPU pool\n");
+
+ break;
+ }
+
+ default:
+ printf("Error in create VM [%m]\n");
+ }
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_poll_enclave_fd() - Thread function for polling the enclave fd.
+ * @data: Argument provided for the polling function.
+ *
+ * Context: Process context.
+ * Return:
+ * * NULL on success / failure.
+ */
+void *ne_poll_enclave_fd(void *data)
+{
+ int enclave_fd = *(int *)data;
+ struct pollfd fds[1] = {};
+ int i = 0;
+ int rc = -EINVAL;
+
+ printf("Running from poll thread, enclave fd %d\n", enclave_fd);
+
+ fds[0].fd = enclave_fd;
+ fds[0].events = POLLIN | POLLERR | POLLHUP;
+
+ /* Keep on polling until the current process is terminated. */
+ while (1) {
+ printf("[iter %d] Polling ...\n", i);
+
+ rc = poll(fds, 1, NE_POLL_WAIT_TIME_MS);
+ if (rc < 0) {
+ printf("Error in poll [%m]\n");
+
+ return NULL;
+ }
+
+ i++;
+
+ if (!rc) {
+ printf("Poll: %d seconds elapsed\n",
+ i * NE_POLL_WAIT_TIME);
+
+ continue;
+ }
+
+ printf("Poll received value 0x%x\n", fds[0].revents);
+
+ if (fds[0].revents & POLLHUP) {
+ printf("Received POLLHUP\n");
+
+ return NULL;
+ }
+
+ if (fds[0].revents & POLLNVAL) {
+ printf("Received POLLNVAL\n");
+
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * ne_alloc_user_mem_region() - Allocate a user space memory region for an enclave.
+ * @ne_user_mem_region: User space memory region allocated using hugetlbfs.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_alloc_user_mem_region(struct ne_user_mem_region *ne_user_mem_region)
+{
+ /**
+ * Check available hugetlb encodings for different huge page sizes in
+ * include/uapi/linux/mman.h.
+ */
+ ne_user_mem_region->userspace_addr = mmap(NULL, ne_user_mem_region->memory_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS |
+ MAP_HUGETLB | MAP_HUGE_2MB, -1, 0);
+ if (ne_user_mem_region->userspace_addr == MAP_FAILED) {
+ printf("Error in mmap memory [%m]\n");
+
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_load_enclave_image() - Place the enclave image in the enclave memory.
+ * @enclave_fd : The file descriptor associated with the enclave.
+ * @ne_user_mem_regions: User space memory regions allocated for the enclave.
+ * @enclave_image_path : The file path of the enclave image.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_load_enclave_image(int enclave_fd, struct ne_user_mem_region ne_user_mem_regions[],
+ char *enclave_image_path)
+{
+ unsigned char *enclave_image = NULL;
+ int enclave_image_fd = -1;
+ size_t enclave_image_size = 0;
+ size_t enclave_memory_size = 0;
+ unsigned long i = 0;
+ size_t image_written_bytes = 0;
+ struct ne_image_load_info image_load_info = {
+ .flags = NE_EIF_IMAGE,
+ };
+ struct stat image_stat_buf = {};
+ int rc = -EINVAL;
+ size_t temp_image_offset = 0;
+
+ for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++)
+ enclave_memory_size += ne_user_mem_regions[i].memory_size;
+
+ rc = stat(enclave_image_path, &image_stat_buf);
+ if (rc < 0) {
+ printf("Error in get image stat info [%m]\n");
+
+ return rc;
+ }
+
+ enclave_image_size = image_stat_buf.st_size;
+
+ if (enclave_memory_size < enclave_image_size) {
+ printf("The enclave memory is smaller than the enclave image size\n");
+
+ return -ENOMEM;
+ }
+
+ rc = ioctl(enclave_fd, NE_GET_IMAGE_LOAD_INFO, &image_load_info);
+ if (rc < 0) {
+ switch (errno) {
+ case NE_ERR_NOT_IN_INIT_STATE: {
+ printf("Error in get image load info, enclave not in init state\n");
+
+ break;
+ }
+
+ case NE_ERR_INVALID_FLAG_VALUE: {
+ printf("Error in get image load info, provided invalid flag\n");
+
+ break;
+ }
+
+ default:
+ printf("Error in get image load info [%m]\n");
+ }
+
+ return rc;
+ }
+
+ printf("Enclave image offset in enclave memory is %lld\n",
+ image_load_info.memory_offset);
+
+ enclave_image_fd = open(enclave_image_path, O_RDONLY);
+ if (enclave_image_fd < 0) {
+ printf("Error in open enclave image file [%m]\n");
+
+ return enclave_image_fd;
+ }
+
+ enclave_image = mmap(NULL, enclave_image_size, PROT_READ,
+ MAP_PRIVATE, enclave_image_fd, 0);
+ if (enclave_image == MAP_FAILED) {
+ printf("Error in mmap enclave image [%m]\n");
+
+ return -1;
+ }
+
+ temp_image_offset = image_load_info.memory_offset;
+
+ for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++) {
+ size_t bytes_to_write = 0;
+ size_t memory_offset = 0;
+ size_t memory_size = ne_user_mem_regions[i].memory_size;
+ size_t remaining_bytes = 0;
+ void *userspace_addr = ne_user_mem_regions[i].userspace_addr;
+
+ if (temp_image_offset >= memory_size) {
+ temp_image_offset -= memory_size;
+
+ continue;
+ } else if (temp_image_offset != 0) {
+ memory_offset = temp_image_offset;
+ memory_size -= temp_image_offset;
+ temp_image_offset = 0;
+ }
+
+ remaining_bytes = enclave_image_size - image_written_bytes;
+ bytes_to_write = memory_size < remaining_bytes ?
+ memory_size : remaining_bytes;
+
+ memcpy(userspace_addr + memory_offset,
+ enclave_image + image_written_bytes, bytes_to_write);
+
+ image_written_bytes += bytes_to_write;
+
+ if (image_written_bytes == enclave_image_size)
+ break;
+ }
+
+ munmap(enclave_image, enclave_image_size);
+
+ close(enclave_image_fd);
+
+ return 0;
+}
+
+/**
+ * ne_set_user_mem_region() - Set a user space memory region for the given enclave.
+ * @enclave_fd : The file descriptor associated with the enclave.
+ * @ne_user_mem_region : User space memory region to be set for the enclave.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_set_user_mem_region(int enclave_fd, struct ne_user_mem_region ne_user_mem_region)
+{
+ struct ne_user_memory_region mem_region = {
+ .flags = NE_DEFAULT_MEMORY_REGION,
+ .memory_size = ne_user_mem_region.memory_size,
+ .userspace_addr = (__u64)ne_user_mem_region.userspace_addr,
+ };
+ int rc = -EINVAL;
+
+ rc = ioctl(enclave_fd, NE_SET_USER_MEMORY_REGION, &mem_region);
+ if (rc < 0) {
+ switch (errno) {
+ case NE_ERR_NOT_IN_INIT_STATE: {
+ printf("Error in set user memory region, enclave not in init state\n");
+
+ break;
+ }
+
+ case NE_ERR_INVALID_MEM_REGION_SIZE: {
+ printf("Error in set user memory region, mem size not multiple of 2 MiB\n");
+
+ break;
+ }
+
+ case NE_ERR_INVALID_MEM_REGION_ADDR: {
+ printf("Error in set user memory region, invalid user space address\n");
+
+ break;
+ }
+
+ case NE_ERR_UNALIGNED_MEM_REGION_ADDR: {
+ printf("Error in set user memory region, unaligned user space address\n");
+
+ break;
+ }
+
+ case NE_ERR_MEM_REGION_ALREADY_USED: {
+ printf("Error in set user memory region, memory region already used\n");
+
+ break;
+ }
+
+ case NE_ERR_MEM_NOT_HUGE_PAGE: {
+ printf("Error in set user memory region, not backed by huge pages\n");
+
+ break;
+ }
+
+ case NE_ERR_MEM_DIFFERENT_NUMA_NODE: {
+ printf("Error in set user memory region, different NUMA node than CPUs\n");
+
+ break;
+ }
+
+ case NE_ERR_MEM_MAX_REGIONS: {
+ printf("Error in set user memory region, max memory regions reached\n");
+
+ break;
+ }
+
+ case NE_ERR_INVALID_PAGE_SIZE: {
+ printf("Error in set user memory region, has page not multiple of 2 MiB\n");
+
+ break;
+ }
+
+ case NE_ERR_INVALID_FLAG_VALUE: {
+ printf("Error in set user memory region, provided invalid flag\n");
+
+ break;
+ }
+
+ default:
+ printf("Error in set user memory region [%m]\n");
+ }
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_free_mem_regions() - Unmap all the user space memory regions that were set
+ * aside for the enclave.
+ * @ne_user_mem_regions: The user space memory regions associated with an enclave.
+ *
+ * Context: Process context.
+ */
+static void ne_free_mem_regions(struct ne_user_mem_region ne_user_mem_regions[])
+{
+ unsigned int i = 0;
+
+ for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++)
+ munmap(ne_user_mem_regions[i].userspace_addr,
+ ne_user_mem_regions[i].memory_size);
+}
+
+/**
+ * ne_add_vcpu() - Add a vCPU to the given enclave.
+ * @enclave_fd : The file descriptor associated with the enclave.
+ * @vcpu_id: vCPU id to be set for the enclave, either provided or
+ * auto-generated (if provided vCPU id is 0).
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_add_vcpu(int enclave_fd, unsigned int *vcpu_id)
+{
+ int rc = -EINVAL;
+
+ rc = ioctl(enclave_fd, NE_ADD_VCPU, vcpu_id);
+ if (rc < 0) {
+ switch (errno) {
+ case NE_ERR_NO_CPUS_AVAIL_IN_POOL: {
+ printf("Error in add vcpu, no CPUs available in the NE CPU pool\n");
+
+ break;
+ }
+
+ case NE_ERR_VCPU_ALREADY_USED: {
+ printf("Error in add vcpu, the provided vCPU is already used\n");
+
+ break;
+ }
+
+ case NE_ERR_VCPU_NOT_IN_CPU_POOL: {
+ printf("Error in add vcpu, the provided vCPU is not in the NE CPU pool\n");
+
+ break;
+ }
+
+ case NE_ERR_VCPU_INVALID_CPU_CORE: {
+ printf("Error in add vcpu, the core id of the provided vCPU is invalid\n");
+
+ break;
+ }
+
+ case NE_ERR_NOT_IN_INIT_STATE: {
+ printf("Error in add vcpu, enclave not in init state\n");
+
+ break;
+ }
+
+ case NE_ERR_INVALID_VCPU: {
+ printf("Error in add vcpu, the provided vCPU is out of avail CPUs range\n");
+
+ break;
+ }
+
+ default:
+ printf("Error in add vcpu [%m]\n");
+ }
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_start_enclave() - Start the given enclave.
+ * @enclave_fd : The file descriptor associated with the enclave.
+ * @enclave_start_info : Enclave metadata used for starting e.g. vsock CID.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_start_enclave(int enclave_fd, struct ne_enclave_start_info *enclave_start_info)
+{
+ int rc = -EINVAL;
+
+ rc = ioctl(enclave_fd, NE_START_ENCLAVE, enclave_start_info);
+ if (rc < 0) {
+ switch (errno) {
+ case NE_ERR_NOT_IN_INIT_STATE: {
+ printf("Error in start enclave, enclave not in init state\n");
+
+ break;
+ }
+
+ case NE_ERR_NO_MEM_REGIONS_ADDED: {
+ printf("Error in start enclave, no memory regions have been added\n");
+
+ break;
+ }
+
+ case NE_ERR_NO_VCPUS_ADDED: {
+ printf("Error in start enclave, no vCPUs have been added\n");
+
+ break;
+ }
+
+ case NE_ERR_FULL_CORES_NOT_USED: {
+ printf("Error in start enclave, enclave has no full cores set\n");
+
+ break;
+ }
+
+ case NE_ERR_ENCLAVE_MEM_MIN_SIZE: {
+ printf("Error in start enclave, enclave memory is less than min size\n");
+
+ break;
+ }
+
+ case NE_ERR_INVALID_FLAG_VALUE: {
+ printf("Error in start enclave, provided invalid flag\n");
+
+ break;
+ }
+
+ case NE_ERR_INVALID_ENCLAVE_CID: {
+ printf("Error in start enclave, provided invalid enclave CID\n");
+
+ break;
+ }
+
+ default:
+ printf("Error in start enclave [%m]\n");
+ }
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_start_enclave_check_booted() - Start the enclave and wait for a heartbeat
+ * from it, on a newly created vsock channel,
+ * to check it has booted.
+ * @enclave_fd : The file descriptor associated with the enclave.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_start_enclave_check_booted(int enclave_fd)
+{
+ struct sockaddr_vm client_vsock_addr = {};
+ int client_vsock_fd = -1;
+ socklen_t client_vsock_len = sizeof(client_vsock_addr);
+ struct ne_enclave_start_info enclave_start_info = {};
+ struct pollfd fds[1] = {};
+ int rc = -EINVAL;
+ unsigned char recv_buf = 0;
+ struct sockaddr_vm server_vsock_addr = {
+ .svm_family = AF_VSOCK,
+ .svm_cid = NE_IMAGE_LOAD_HEARTBEAT_CID,
+ .svm_port = NE_IMAGE_LOAD_HEARTBEAT_PORT,
+ };
+ int server_vsock_fd = -1;
+
+ server_vsock_fd = socket(AF_VSOCK, SOCK_STREAM, 0);
+ if (server_vsock_fd < 0) {
+ rc = server_vsock_fd;
+
+ printf("Error in socket [%m]\n");
+
+ return rc;
+ }
+
+ rc = bind(server_vsock_fd, (struct sockaddr *)&server_vsock_addr,
+ sizeof(server_vsock_addr));
+ if (rc < 0) {
+ printf("Error in bind [%m]\n");
+
+ goto out;
+ }
+
+ rc = listen(server_vsock_fd, 1);
+ if (rc < 0) {
+ printf("Error in listen [%m]\n");
+
+ goto out;
+ }
+
+ rc = ne_start_enclave(enclave_fd, &enclave_start_info);
+ if (rc < 0)
+ goto out;
+
+ printf("Enclave started, CID %llu\n", enclave_start_info.enclave_cid);
+
+ fds[0].fd = server_vsock_fd;
+ fds[0].events = POLLIN;
+
+ rc = poll(fds, 1, NE_POLL_WAIT_TIME_MS);
+ if (rc < 0) {
+ printf("Error in poll [%m]\n");
+
+ goto out;
+ }
+
+ if (!rc) {
+ printf("Poll timeout, %d seconds elapsed\n", NE_POLL_WAIT_TIME);
+
+ rc = -ETIMEDOUT;
+
+ goto out;
+ }
+
+ if ((fds[0].revents & POLLIN) == 0) {
+ printf("Poll received value %d\n", fds[0].revents);
+
+ rc = -EINVAL;
+
+ goto out;
+ }
+
+ rc = accept(server_vsock_fd, (struct sockaddr *)&client_vsock_addr,
+ &client_vsock_len);
+ if (rc < 0) {
+ printf("Error in accept [%m]\n");
+
+ goto out;
+ }
+
+ client_vsock_fd = rc;
+
+ /*
+ * Read the heartbeat value that the init process in the enclave sends
+ * after vsock connect.
+ */
+ rc = read(client_vsock_fd, &recv_buf, sizeof(recv_buf));
+ if (rc < 0) {
+ printf("Error in read [%m]\n");
+
+ goto out;
+ }
+
+ if (rc != sizeof(recv_buf) || recv_buf != NE_IMAGE_LOAD_HEARTBEAT_VALUE) {
+ printf("Read %d instead of %d\n", recv_buf,
+ NE_IMAGE_LOAD_HEARTBEAT_VALUE);
+
+ goto out;
+ }
+
+ /* Write the heartbeat value back. */
+ rc = write(client_vsock_fd, &recv_buf, sizeof(recv_buf));
+ if (rc < 0) {
+ printf("Error in write [%m]\n");
+
+ goto out;
+ }
+
+ rc = 0;
+
+out:
+ close(server_vsock_fd);
+
+ return rc;
+}
+
+int main(int argc, char *argv[])
+{
+ int enclave_fd = -1;
+ unsigned int i = 0;
+ int ne_dev_fd = -1;
+ struct ne_user_mem_region ne_user_mem_regions[NE_DEFAULT_NR_MEM_REGIONS] = {};
+ unsigned int ne_vcpus[NE_DEFAULT_NR_VCPUS] = {};
+ int rc = -EINVAL;
+ pthread_t thread_id = 0;
+ unsigned long slot_uid = 0;
+
+ if (argc != 2) {
+ printf("Usage: %s <path_to_enclave_image>\n", argv[0]);
+
+ exit(EXIT_FAILURE);
+ }
+
+ if (strlen(argv[1]) >= PATH_MAX) {
+ printf("The size of the path to enclave image is higher than max path\n");
+
+ exit(EXIT_FAILURE);
+ }
+
+ ne_dev_fd = open(NE_DEV_NAME, O_RDWR | O_CLOEXEC);
+ if (ne_dev_fd < 0) {
+ printf("Error in open NE device [%m]\n");
+
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Creating enclave slot ...\n");
+
+ rc = ne_create_vm(ne_dev_fd, &slot_uid, &enclave_fd);
+
+ close(ne_dev_fd);
+
+ if (rc < 0)
+ exit(EXIT_FAILURE);
+
+ printf("Enclave fd %d\n", enclave_fd);
+
+ rc = pthread_create(&thread_id, NULL, ne_poll_enclave_fd, (void *)&enclave_fd);
+ if (rc < 0) {
+ printf("Error in thread create [%m]\n");
+
+ close(enclave_fd);
+
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++) {
+ ne_user_mem_regions[i].memory_size = NE_MIN_MEM_REGION_SIZE;
+
+ rc = ne_alloc_user_mem_region(&ne_user_mem_regions[i]);
+ if (rc < 0) {
+ printf("Error in alloc userspace memory region, iter %d\n", i);
+
+ goto release_enclave_fd;
+ }
+ }
+
+ rc = ne_load_enclave_image(enclave_fd, ne_user_mem_regions, argv[1]);
+ if (rc < 0)
+ goto release_enclave_fd;
+
+ for (i = 0; i < NE_DEFAULT_NR_MEM_REGIONS; i++) {
+ rc = ne_set_user_mem_region(enclave_fd, ne_user_mem_regions[i]);
+ if (rc < 0) {
+ printf("Error in set memory region, iter %d\n", i);
+
+ goto release_enclave_fd;
+ }
+ }
+
+ printf("Enclave memory regions were added\n");
+
+ for (i = 0; i < NE_DEFAULT_NR_VCPUS; i++) {
+ /*
+ * The vCPU is chosen from the enclave vCPU pool, if the value
+ * of the vcpu_id is 0.
+ */
+ ne_vcpus[i] = 0;
+ rc = ne_add_vcpu(enclave_fd, &ne_vcpus[i]);
+ if (rc < 0) {
+ printf("Error in add vcpu, iter %d\n", i);
+
+ goto release_enclave_fd;
+ }
+
+ printf("Added vCPU %d to the enclave\n", ne_vcpus[i]);
+ }
+
+ printf("Enclave vCPUs were added\n");
+
+ rc = ne_start_enclave_check_booted(enclave_fd);
+ if (rc < 0) {
+ printf("Error in the enclave start / image loading heartbeat logic [rc=%d]\n", rc);
+
+ goto release_enclave_fd;
+ }
+
+ printf("Entering sleep for %d seconds ...\n", NE_SLEEP_TIME);
+
+ sleep(NE_SLEEP_TIME);
+
+ close(enclave_fd);
+
+ ne_free_mem_regions(ne_user_mem_regions);
+
+ exit(EXIT_SUCCESS);
+
+release_enclave_fd:
+ close(enclave_fd);
+ ne_free_mem_regions(ne_user_mem_regions);
+
+ exit(EXIT_FAILURE);
+}
diff --git a/samples/pidfd/.gitignore b/samples/pidfd/.gitignore
new file mode 100644
index 000000000..d4cfa3176
--- /dev/null
+++ b/samples/pidfd/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/pidfd-metadata
diff --git a/samples/pidfd/Makefile b/samples/pidfd/Makefile
new file mode 100644
index 000000000..9754e2d81
--- /dev/null
+++ b/samples/pidfd/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+usertprogs-always-y += pidfd-metadata
+
+userccflags += -I usr/include
diff --git a/samples/pidfd/pidfd-metadata.c b/samples/pidfd/pidfd-metadata.c
new file mode 100644
index 000000000..c459155da
--- /dev/null
+++ b/samples/pidfd/pidfd-metadata.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <err.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#ifndef CLONE_PIDFD
+#define CLONE_PIDFD 0x00001000
+#endif
+
+#ifndef __NR_pidfd_send_signal
+#define __NR_pidfd_send_signal -1
+#endif
+
+static int do_child(void *args)
+{
+ printf("%d\n", getpid());
+ _exit(EXIT_SUCCESS);
+}
+
+static pid_t pidfd_clone(int flags, int *pidfd)
+{
+ size_t stack_size = 1024;
+ char *stack[1024] = { 0 };
+
+#ifdef __ia64__
+ return __clone2(do_child, stack, stack_size, flags | SIGCHLD, NULL, pidfd);
+#else
+ return clone(do_child, stack + stack_size, flags | SIGCHLD, NULL, pidfd);
+#endif
+}
+
+static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
+ unsigned int flags)
+{
+ return syscall(__NR_pidfd_send_signal, pidfd, sig, info, flags);
+}
+
+static int pidfd_metadata_fd(pid_t pid, int pidfd)
+{
+ int procfd, ret;
+ char path[100];
+
+ snprintf(path, sizeof(path), "/proc/%d", pid);
+ procfd = open(path, O_DIRECTORY | O_RDONLY | O_CLOEXEC);
+ if (procfd < 0) {
+ warn("Failed to open %s\n", path);
+ return -1;
+ }
+
+ /*
+ * Verify that the pid has not been recycled and our /proc/<pid> handle
+ * is still valid.
+ */
+ ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
+ if (ret < 0) {
+ switch (errno) {
+ case EPERM:
+ /* Process exists, just not allowed to signal it. */
+ break;
+ default:
+ warn("Failed to signal process\n");
+ close(procfd);
+ procfd = -1;
+ }
+ }
+
+ return procfd;
+}
+
+int main(int argc, char *argv[])
+{
+ int pidfd = -1, ret = EXIT_FAILURE;
+ char buf[4096] = { 0 };
+ pid_t pid;
+ int procfd, statusfd;
+ ssize_t bytes;
+
+ pid = pidfd_clone(CLONE_PIDFD, &pidfd);
+ if (pid < 0)
+ err(ret, "CLONE_PIDFD");
+ if (pidfd == -1) {
+ warnx("CLONE_PIDFD is not supported by the kernel");
+ goto out;
+ }
+
+ procfd = pidfd_metadata_fd(pid, pidfd);
+ close(pidfd);
+ if (procfd < 0)
+ goto out;
+
+ statusfd = openat(procfd, "status", O_RDONLY | O_CLOEXEC);
+ close(procfd);
+ if (statusfd < 0)
+ goto out;
+
+ bytes = read(statusfd, buf, sizeof(buf));
+ if (bytes > 0)
+ bytes = write(STDOUT_FILENO, buf, bytes);
+ close(statusfd);
+ ret = EXIT_SUCCESS;
+
+out:
+ (void)wait(NULL);
+
+ exit(ret);
+}
diff --git a/samples/pktgen/README.rst b/samples/pktgen/README.rst
new file mode 100644
index 000000000..f4adeed5f
--- /dev/null
+++ b/samples/pktgen/README.rst
@@ -0,0 +1,64 @@
+Sample and benchmark scripts for pktgen (packet generator)
+==========================================================
+This directory contains some pktgen sample and benchmark scripts, that
+can easily be copied and adjusted for your own use-case.
+
+General doc is located in kernel: Documentation/networking/pktgen.rst
+
+Helper include files
+====================
+This directory contains two helper shell files, that can be "included"
+by shell source'ing. Namely "functions.sh" and "parameters.sh".
+
+Common parameters
+-----------------
+The parameters.sh file support easy and consistant parameter parsing
+across the sample scripts. Usage example is printed on errors::
+
+ Usage: ./pktgen_sample01_simple.sh [-vx] -i ethX
+ -i : ($DEV) output interface/device (required)
+ -s : ($PKT_SIZE) packet size
+ -d : ($DEST_IP) destination IP. CIDR (e.g. 198.18.0.0/15) is also allowed
+ -m : ($DST_MAC) destination MAC-addr
+ -p : ($DST_PORT) destination PORT range (e.g. 433-444) is also allowed
+ -t : ($THREADS) threads to start
+ -f : ($F_THREAD) index of first thread (zero indexed CPU number)
+ -c : ($SKB_CLONE) SKB clones send before alloc new SKB
+ -n : ($COUNT) num messages to send per thread, 0 means indefinitely
+ -b : ($BURST) HW level bursting of SKBs
+ -v : ($VERBOSE) verbose
+ -x : ($DEBUG) debug
+ -6 : ($IP6) IPv6
+ -w : ($DELAY) Tx Delay value (ns)
+ -a : ($APPEND) Script will not reset generator's state, but will append its config
+
+The global variable being set is also listed. E.g. the required
+interface/device parameter "-i" sets variable $DEV.
+
+"-a" parameter may be used to create different flows simultaneously.
+In this mode script will keep the existing config, will append its settings.
+In this mode you'll have to manually run traffic with "pg_ctrl start".
+
+For example you may use:
+
+ source ./samples/pktgen/functions.sh
+ pg_ctrl reset
+ # add first device
+ ./pktgen_sample06_numa_awared_queue_irq_affinity.sh -a -i ens1f0 -m 34:80:0d:a3:fc:c9 -t 8
+ # add second device
+ ./pktgen_sample06_numa_awared_queue_irq_affinity.sh -a -i ens1f1 -m 34:80:0d:a3:fc:c9 -t 8
+ # run joint traffic on two devs
+ pg_ctrl start
+
+Common functions
+----------------
+The functions.sh file provides; Three different shell functions for
+configuring the different components of pktgen: pg_ctrl(), pg_thread()
+and pg_set().
+
+These functions correspond to pktgens different components.
+ * pg_ctrl() control "pgctrl" (/proc/net/pktgen/pgctrl)
+ * pg_thread() control the kernel threads and binding to devices
+ * pg_set() control setup of individual devices
+
+See sample scripts for usage examples.
diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
new file mode 100644
index 000000000..933194257
--- /dev/null
+++ b/samples/pktgen/functions.sh
@@ -0,0 +1,339 @@
+#
+# Common functions used by pktgen scripts
+# - Depending on bash 3 (or higher) syntax
+#
+# Author: Jesper Dangaaard Brouer
+# License: GPL
+
+set -o errexit
+
+## -- General shell logging cmds --
+function err() {
+ local exitcode=$1
+ shift
+ echo "ERROR: $@" >&2
+ exit $exitcode
+}
+
+function warn() {
+ echo "WARN : $@" >&2
+}
+
+function info() {
+ if [[ -n "$VERBOSE" ]]; then
+ echo "INFO : $@" >&2
+ fi
+}
+
+## -- Pktgen proc config commands -- ##
+export PROC_DIR=/proc/net/pktgen
+#
+# Three different shell functions for configuring the different
+# components of pktgen:
+# pg_ctrl(), pg_thread() and pg_set().
+#
+# These functions correspond to pktgens different components.
+# * pg_ctrl() control "pgctrl" (/proc/net/pktgen/pgctrl)
+# * pg_thread() control the kernel threads and binding to devices
+# * pg_set() control setup of individual devices
+function pg_ctrl() {
+ local proc_file="pgctrl"
+ proc_cmd ${proc_file} "$@"
+}
+
+function pg_thread() {
+ local thread=$1
+ local proc_file="kpktgend_${thread}"
+ shift
+ proc_cmd ${proc_file} "$@"
+}
+
+function pg_set() {
+ local dev=$1
+ local proc_file="$dev"
+ shift
+ proc_cmd ${proc_file} "$@"
+}
+
+# More generic replacement for pgset(), that does not depend on global
+# variable for proc file.
+function proc_cmd() {
+ local result
+ local proc_file=$1
+ local status=0
+ # after shift, the remaining args are contained in $@
+ shift
+ local proc_ctrl=${PROC_DIR}/$proc_file
+ if [[ ! -e "$proc_ctrl" ]]; then
+ err 3 "proc file:$proc_ctrl does not exists (dev added to thread?)"
+ else
+ if [[ ! -w "$proc_ctrl" ]]; then
+ err 4 "proc file:$proc_ctrl not writable, not root?!"
+ fi
+ fi
+
+ if [[ "$DEBUG" == "yes" ]]; then
+ echo "cmd: $@ > $proc_ctrl"
+ fi
+ # Quoting of "$@" is important for space expansion
+ echo "$@" > "$proc_ctrl" || status=$?
+
+ if [[ "$proc_file" != "pgctrl" ]]; then
+ result=$(grep "Result: OK:" $proc_ctrl) || true
+ if [[ "$result" == "" ]]; then
+ grep "Result:" $proc_ctrl >&2
+ fi
+ fi
+ if (( $status != 0 )); then
+ err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\""
+ fi
+}
+
+# Old obsolete "pgset" function, with slightly improved err handling
+function pgset() {
+ local result
+
+ if [[ "$DEBUG" == "yes" ]]; then
+ echo "cmd: $1 > $PGDEV"
+ fi
+ echo $1 > $PGDEV
+ local status=$?
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [[ "$result" == "" ]]; then
+ cat $PGDEV | fgrep Result:
+ fi
+ if (( $status != 0 )); then
+ err 5 "Write error($status) occurred cmd: \"$1 > $PGDEV\""
+ fi
+}
+
+if [[ -z "$APPEND" ]]; then
+ if [[ $EUID -eq 0 ]]; then
+ # Cleanup pktgen setup on exit if thats not "append mode"
+ trap 'pg_ctrl "reset"' EXIT
+ fi
+fi
+
+## -- General shell tricks --
+
+function root_check_run_with_sudo() {
+ # Trick so, program can be run as normal user, will just use "sudo"
+ # call as root_check_run_as_sudo "$@"
+ if [ "$EUID" -ne 0 ]; then
+ if [ -x $0 ]; then # Directly executable use sudo
+ info "Not root, running with sudo"
+ sudo -E "$0" "$@"
+ exit $?
+ fi
+ err 4 "cannot perform sudo run of $0"
+ fi
+}
+
+# Exact input device's NUMA node info
+function get_iface_node()
+{
+ local node=$(</sys/class/net/$1/device/numa_node)
+ if [[ $node == -1 ]]; then
+ echo 0
+ else
+ echo $node
+ fi
+}
+
+# Given an Dev/iface, get its queues' irq numbers
+function get_iface_irqs()
+{
+ local IFACE=$1
+ local queues="${IFACE}-.*TxRx"
+
+ irqs=$(grep "$queues" /proc/interrupts | cut -f1 -d:)
+ [ -z "$irqs" ] && irqs=$(grep $IFACE /proc/interrupts | cut -f1 -d:)
+ [ -z "$irqs" ] && irqs=$(for i in `ls -Ux /sys/class/net/$IFACE/device/msi_irqs` ;\
+ do grep "$i:.*TxRx" /proc/interrupts | grep -v fdir | cut -f 1 -d : ;\
+ done)
+ [ -z "$irqs" ] && err 3 "Could not find interrupts for $IFACE"
+
+ echo $irqs
+}
+
+# Given a NUMA node, return cpu ids belonging to it.
+function get_node_cpus()
+{
+ local node=$1
+ local node_cpu_list
+ local node_cpu_range_list=`cut -f1- -d, --output-delimiter=" " \
+ /sys/devices/system/node/node$node/cpulist`
+
+ for cpu_range in $node_cpu_range_list
+ do
+ node_cpu_list="$node_cpu_list "`seq -s " " ${cpu_range//-/ }`
+ done
+
+ echo $node_cpu_list
+}
+
+# Check $1 is in between $2, $3 ($2 <= $1 <= $3)
+function in_between() { [[ ($1 -ge $2) && ($1 -le $3) ]] ; }
+
+# Extend shrunken IPv6 address.
+# fe80::42:bcff:fe84:e10a => fe80:0:0:0:42:bcff:fe84:e10a
+function extend_addr6()
+{
+ local addr=$1
+ local sep=: sep2=::
+ local sep_cnt=$(tr -cd $sep <<< $1 | wc -c)
+ local shrink
+
+ # separator count should be (2 <= $sep_cnt <= 7)
+ if ! (in_between $sep_cnt 2 7); then
+ err 5 "Invalid IP6 address: $1"
+ fi
+
+ # if shrink '::' occurs multiple, it's malformed.
+ shrink=( $(egrep -o "$sep{2,}" <<< $addr) )
+ if [[ ${#shrink[@]} -ne 0 ]]; then
+ if [[ ${#shrink[@]} -gt 1 || ( ${shrink[0]} != $sep2 ) ]]; then
+ err 5 "Invalid IP6 address: $1"
+ fi
+ fi
+
+ # add 0 at begin & end, and extend addr by adding :0
+ [[ ${addr:0:1} == $sep ]] && addr=0${addr}
+ [[ ${addr: -1} == $sep ]] && addr=${addr}0
+ echo "${addr/$sep2/$(printf ':0%.s' $(seq $[8-sep_cnt])):}"
+}
+
+# Given a single IP(v4/v6) address, whether it is valid.
+function validate_addr()
+{
+ # check function is called with (funcname)6
+ [[ ${FUNCNAME[1]: -1} == 6 ]] && local IP6=6
+ local bitlen=$[ IP6 ? 128 : 32 ]
+ local len=$[ IP6 ? 8 : 4 ]
+ local max=$[ 2**(len*2)-1 ]
+ local net prefix
+ local addr sep
+
+ IFS='/' read net prefix <<< $1
+ [[ $IP6 ]] && net=$(extend_addr6 $net)
+
+ # if prefix exists, check (0 <= $prefix <= $bitlen)
+ if [[ -n $prefix ]]; then
+ if ! (in_between $prefix 0 $bitlen); then
+ err 5 "Invalid prefix: /$prefix"
+ fi
+ fi
+
+ # set separator for each IP(v4/v6)
+ [[ $IP6 ]] && sep=: || sep=.
+ IFS=$sep read -a addr <<< $net
+
+ # array length
+ if [[ ${#addr[@]} != $len ]]; then
+ err 5 "Invalid IP$IP6 address: $1"
+ fi
+
+ # check each digit (0 <= $digit <= $max)
+ for digit in "${addr[@]}"; do
+ [[ $IP6 ]] && digit=$[ 16#$digit ]
+ if ! (in_between $digit 0 $max); then
+ err 5 "Invalid IP$IP6 address: $1"
+ fi
+ done
+
+ return 0
+}
+
+function validate_addr6() { validate_addr $@ ; }
+
+# Given a single IP(v4/v6) or CIDR, return minimum and maximum IP addr.
+function parse_addr()
+{
+ # check function is called with (funcname)6
+ [[ ${FUNCNAME[1]: -1} == 6 ]] && local IP6=6
+ local net prefix
+ local min_ip max_ip
+
+ IFS='/' read net prefix <<< $1
+ [[ $IP6 ]] && net=$(extend_addr6 $net)
+
+ if [[ -z $prefix ]]; then
+ min_ip=$net
+ max_ip=$net
+ else
+ # defining array for converting Decimal 2 Binary
+ # 00000000 00000001 00000010 00000011 00000100 ...
+ local d2b='{0..1}{0..1}{0..1}{0..1}{0..1}{0..1}{0..1}{0..1}'
+ [[ $IP6 ]] && d2b+=$d2b
+ eval local D2B=($d2b)
+
+ local bitlen=$[ IP6 ? 128 : 32 ]
+ local remain=$[ bitlen-prefix ]
+ local octet=$[ IP6 ? 16 : 8 ]
+ local min_mask max_mask
+ local min max
+ local ip_bit
+ local ip sep
+
+ # set separator for each IP(v4/v6)
+ [[ $IP6 ]] && sep=: || sep=.
+ IFS=$sep read -ra ip <<< $net
+
+ min_mask="$(printf '1%.s' $(seq $prefix))$(printf '0%.s' $(seq $remain))"
+ max_mask="$(printf '0%.s' $(seq $prefix))$(printf '1%.s' $(seq $remain))"
+
+ # calculate min/max ip with &,| operator
+ for i in "${!ip[@]}"; do
+ digit=$[ IP6 ? 16#${ip[$i]} : ${ip[$i]} ]
+ ip_bit=${D2B[$digit]}
+
+ idx=$[ octet*i ]
+ min[$i]=$[ 2#$ip_bit & 2#${min_mask:$idx:$octet} ]
+ max[$i]=$[ 2#$ip_bit | 2#${max_mask:$idx:$octet} ]
+ [[ $IP6 ]] && { min[$i]=$(printf '%X' ${min[$i]});
+ max[$i]=$(printf '%X' ${max[$i]}); }
+ done
+
+ min_ip=$(IFS=$sep; echo "${min[*]}")
+ max_ip=$(IFS=$sep; echo "${max[*]}")
+ fi
+
+ echo $min_ip $max_ip
+}
+
+function parse_addr6() { parse_addr $@ ; }
+
+# Given a single or range of port(s), return minimum and maximum port number.
+function parse_ports()
+{
+ local port_str=$1
+ local port_list
+ local min_port
+ local max_port
+
+ IFS="-" read -ra port_list <<< $port_str
+
+ min_port=${port_list[0]}
+ max_port=${port_list[1]:-$min_port}
+
+ echo $min_port $max_port
+}
+
+# Given a minimum and maximum port, verify port number.
+function validate_ports()
+{
+ local min_port=$1
+ local max_port=$2
+
+ # 1 <= port <= 65535
+ if (in_between $min_port 1 65535); then
+ if (in_between $max_port 1 65535); then
+ if [[ $min_port -le $max_port ]]; then
+ return 0
+ fi
+ fi
+ fi
+
+ err 5 "Invalid port(s): $min_port-$max_port"
+}
diff --git a/samples/pktgen/parameters.sh b/samples/pktgen/parameters.sh
new file mode 100644
index 000000000..81906f199
--- /dev/null
+++ b/samples/pktgen/parameters.sh
@@ -0,0 +1,139 @@
+#
+# SPDX-License-Identifier: GPL-2.0
+# Common parameter parsing for pktgen scripts
+#
+
+function usage() {
+ echo ""
+ echo "Usage: $0 [-vx] -i ethX"
+ echo " -i : (\$DEV) output interface/device (required)"
+ echo " -s : (\$PKT_SIZE) packet size"
+ echo " -d : (\$DEST_IP) destination IP. CIDR (e.g. 198.18.0.0/15) is also allowed"
+ echo " -m : (\$DST_MAC) destination MAC-addr"
+ echo " -p : (\$DST_PORT) destination PORT range (e.g. 433-444) is also allowed"
+ echo " -k : (\$UDP_CSUM) enable UDP tx checksum"
+ echo " -t : (\$THREADS) threads to start"
+ echo " -f : (\$F_THREAD) index of first thread (zero indexed CPU number)"
+ echo " -c : (\$SKB_CLONE) SKB clones send before alloc new SKB"
+ echo " -n : (\$COUNT) num messages to send per thread, 0 means indefinitely"
+ echo " -b : (\$BURST) HW level bursting of SKBs"
+ echo " -v : (\$VERBOSE) verbose"
+ echo " -x : (\$DEBUG) debug"
+ echo " -6 : (\$IP6) IPv6"
+ echo " -w : (\$DELAY) Tx Delay value (ns)"
+ echo " -a : (\$APPEND) Script will not reset generator's state, but will append its config"
+ echo ""
+}
+
+## --- Parse command line arguments / parameters ---
+## echo "Commandline options:"
+while getopts "s:i:d:m:p:f:t:c:n:b:w:vxh6ak" option; do
+ case $option in
+ i) # interface
+ export DEV=$OPTARG
+ info "Output device set to: DEV=$DEV"
+ ;;
+ s)
+ export PKT_SIZE=$OPTARG
+ info "Packet size set to: PKT_SIZE=$PKT_SIZE bytes"
+ ;;
+ d) # destination IP
+ export DEST_IP=$OPTARG
+ info "Destination IP set to: DEST_IP=$DEST_IP"
+ ;;
+ m) # MAC
+ export DST_MAC=$OPTARG
+ info "Destination MAC set to: DST_MAC=$DST_MAC"
+ ;;
+ p) # PORT
+ export DST_PORT=$OPTARG
+ info "Destination PORT set to: DST_PORT=$DST_PORT"
+ ;;
+ f)
+ export F_THREAD=$OPTARG
+ info "Index of first thread (zero indexed CPU number): $F_THREAD"
+ ;;
+ t)
+ export THREADS=$OPTARG
+ info "Number of threads to start: $THREADS"
+ ;;
+ c)
+ export CLONE_SKB=$OPTARG
+ info "CLONE_SKB=$CLONE_SKB"
+ ;;
+ n)
+ export COUNT=$OPTARG
+ info "COUNT=$COUNT"
+ ;;
+ b)
+ export BURST=$OPTARG
+ info "SKB bursting: BURST=$BURST"
+ ;;
+ w)
+ export DELAY=$OPTARG
+ info "DELAY=$DELAY"
+ ;;
+ v)
+ export VERBOSE=yes
+ info "Verbose mode: VERBOSE=$VERBOSE"
+ ;;
+ x)
+ export DEBUG=yes
+ info "Debug mode: DEBUG=$DEBUG"
+ ;;
+ 6)
+ export IP6=6
+ info "IP6: IP6=$IP6"
+ ;;
+ a)
+ export APPEND=yes
+ info "Append mode: APPEND=$APPEND"
+ ;;
+ k)
+ export UDP_CSUM=yes
+ info "UDP tx checksum: UDP_CSUM=$UDP_CSUM"
+ ;;
+ h|?|*)
+ usage;
+ err 2 "[ERROR] Unknown parameters!!!"
+ esac
+done
+shift $(( $OPTIND - 1 ))
+
+if [ -z "$PKT_SIZE" ]; then
+ # NIC adds 4 bytes CRC
+ export PKT_SIZE=60
+ info "Default packet size set to: set to: $PKT_SIZE bytes"
+fi
+
+if [ -z "$F_THREAD" ]; then
+ # First thread (F_THREAD) reference the zero indexed CPU number
+ export F_THREAD=0
+fi
+
+if [ -z "$THREADS" ]; then
+ export THREADS=1
+fi
+
+# default DELAY
+[ -z "$DELAY" ] && export DELAY=0 # Zero means max speed
+
+export L_THREAD=$(( THREADS + F_THREAD - 1 ))
+
+if [ -z "$DEV" ]; then
+ usage
+ err 2 "Please specify output device"
+fi
+
+if [ -z "$DST_MAC" ]; then
+ warn "Missing destination MAC address"
+fi
+
+if [ -z "$DEST_IP" ]; then
+ warn "Missing destination IP address"
+fi
+
+if [ ! -d /proc/net/pktgen ]; then
+ info "Loading kernel module: pktgen"
+ modprobe pktgen
+fi
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
new file mode 100755
index 000000000..99ec0688b
--- /dev/null
+++ b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Benchmark script:
+# - developed for benchmarking ingress qdisc path
+#
+# Script for injecting packets into RX path of the stack with pktgen
+# "xmit_mode netif_receive". With an invalid dst_mac this will only
+# measure the ingress code path as packets gets dropped in ip_rcv().
+#
+# This script don't really need any hardware. It benchmarks software
+# RX path just after NIC driver level. With bursting is also
+# "removes" the SKB alloc/free overhead.
+#
+# Setup scenarios for measuring ingress qdisc (with invalid dst_mac):
+# ------------------------------------------------------------------
+# (1) no ingress (uses static_key_false(&ingress_needed))
+#
+# (2) ingress on other dev (change ingress_needed and calls
+# handle_ing() but exit early)
+#
+# config: tc qdisc add dev $SOMEDEV handle ffff: ingress
+#
+# (3) ingress on this dev, handle_ing() -> tc_classify()
+#
+# config: tc qdisc add dev $DEV handle ffff: ingress
+#
+# (4) ingress on this dev + drop at u32 classifier/action.
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Using invalid DST_MAC will cause the packets to get dropped in
+# ip_rcv() which is part of the test
+if [ -z "$DEST_IP" ]; then
+ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
+fi
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$BURST" ] && BURST=1024
+[ -z "$COUNT" ] && COUNT="10000000" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
+if [ -n "$DST_PORT" ]; then
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
+fi
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ # The device name is extended with @name, using thread number to
+ # make then unique, but any name will do.
+ dev=${DEV}@${thread}
+
+ # Add remove all other devices and add_device $dev to thread
+ pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # Base config of dev
+ pg_set $dev "flag QUEUE_MAP_CPU"
+ pg_set $dev "count $COUNT"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
+
+ if [ -n "$DST_PORT" ]; then
+ # Single destination port or random port range
+ pg_set $dev "flag UDPDST_RND"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
+ fi
+
+ # Inject packet into RX path of stack
+ pg_set $dev "xmit_mode netif_receive"
+
+ # Burst allow us to avoid measuring SKB alloc/free overhead
+ pg_set $dev "burst $BURST"
+done
+
+# Run if user hits control-c
+function print_result() {
+ # Print results
+ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+ done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap true SIGINT
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+print_result
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
new file mode 100755
index 000000000..04b0dd0c3
--- /dev/null
+++ b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Benchmark script:
+# - developed for benchmarking egress qdisc path, derived (more
+# like cut'n'pasted) from ingress benchmark script.
+#
+# Script for injecting packets into egress qdisc path of the stack
+# with pktgen "xmit_mode queue_xmit".
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+if [ -z "$DEST_IP" ]; then
+ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
+fi
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+
+# Burst greater than 1 are invalid for queue_xmit mode
+if [[ -n "$BURST" ]]; then
+ err 1 "Bursting not supported for this mode"
+fi
+[ -z "$COUNT" ] && COUNT="10000000" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
+if [ -n "$DST_PORT" ]; then
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
+fi
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ # The device name is extended with @name, using thread number to
+ # make then unique, but any name will do.
+ dev=${DEV}@${thread}
+
+ # Add remove all other devices and add_device $dev to thread
+ pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # Base config of dev
+ pg_set $dev "flag QUEUE_MAP_CPU"
+ pg_set $dev "count $COUNT"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
+
+ if [ -n "$DST_PORT" ]; then
+ # Single destination port or random port range
+ pg_set $dev "flag UDPDST_RND"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
+ fi
+
+ # Inject packet into TX qdisc egress path of stack
+ pg_set $dev "xmit_mode queue_xmit"
+done
+
+# Run if user hits control-c
+function print_result {
+ # Print results
+ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+ done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap true SIGINT
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+print_result
diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh
new file mode 100755
index 000000000..09a92ea96
--- /dev/null
+++ b/samples/pktgen/pktgen_sample01_simple.sh
@@ -0,0 +1,100 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Simple example:
+# * pktgen sending with single thread and single interface
+# * flow variation via random UDP source port
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+# - go look in parameters.sh to see which setting are avail
+# - required param is the interface "-i" stored in $DEV
+source ${basedir}/parameters.sh
+#
+# Set some default params, if they didn't get set
+if [ -z "$DEST_IP" ]; then
+ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
+fi
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+# Example enforce param "-m" for dst_mac
+[ -z "$DST_MAC" ] && usage && err 2 "Must specify -m dst_mac"
+[ -z "$COUNT" ] && COUNT="100000" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
+if [ -n "$DST_PORT" ]; then
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
+fi
+
+# Flow variation random source port between min and max
+UDP_SRC_MIN=9
+UDP_SRC_MAX=109
+
+# General cleanup everything since last run
+# (especially important if other threads were configured by other scripts)
+[ -z "$APPEND" ] && pg_ctrl "reset"
+
+# Add remove all other devices and add_device $DEV to thread 0
+thread=0
+[ -z "$APPEND" ] && pg_thread $thread "rem_device_all"
+pg_thread $thread "add_device" $DEV
+
+# How many packets to send (zero means indefinitely)
+pg_set $DEV "count $COUNT"
+
+# Reduce alloc cost by sending same SKB many times
+# - this obviously affects the randomness within the packet
+pg_set $DEV "clone_skb $CLONE_SKB"
+
+# Set packet size
+pg_set $DEV "pkt_size $PKT_SIZE"
+
+# Delay between packets (zero means max speed)
+pg_set $DEV "delay $DELAY"
+
+# Flag example disabling timestamping
+pg_set $DEV "flag NO_TIMESTAMP"
+
+# Destination
+pg_set $DEV "dst_mac $DST_MAC"
+pg_set $DEV "dst${IP6}_min $DST_MIN"
+pg_set $DEV "dst${IP6}_max $DST_MAX"
+
+if [ -n "$DST_PORT" ]; then
+ # Single destination port or random port range
+ pg_set $DEV "flag UDPDST_RND"
+ pg_set $DEV "udp_dst_min $UDP_DST_MIN"
+ pg_set $DEV "udp_dst_max $UDP_DST_MAX"
+fi
+
+[ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
+# Setup random UDP port src range
+pg_set $DEV "flag UDPSRC_RND"
+pg_set $DEV "udp_src_min $UDP_SRC_MIN"
+pg_set $DEV "udp_src_max $UDP_SRC_MAX"
+
+# Run if user hits control-c
+function print_result() {
+ # Print results
+ echo "Result device: $DEV"
+ cat /proc/net/pktgen/$DEV
+}
+# trap keyboard interrupt (Ctrl-C)
+trap true SIGINT
+
+if [ -z "$APPEND" ]; then
+ # start_run
+ echo "Running... ctrl^C to stop" >&2
+ pg_ctrl "start"
+ echo "Done" >&2
+
+ print_result
+else
+ echo "Append mode: config done. Do more or use 'pg_ctrl start' to run"
+fi \ No newline at end of file
diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh
new file mode 100755
index 000000000..7fa41c84c
--- /dev/null
+++ b/samples/pktgen/pktgen_sample02_multiqueue.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Multiqueue: Using pktgen threads for sending on multiple CPUs
+# * adding devices to kernel threads
+# * notice the naming scheme for keeping device names unique
+# * nameing scheme: dev@thread_number
+# * flow variation via random UDP source port
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+#
+# Required param: -i dev in $DEV
+source ${basedir}/parameters.sh
+
+[ -z "$COUNT" ] && COUNT="100000" # Zero means indefinitely
+
+# Base Config
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+
+# Flow variation random source port between min and max
+UDP_SRC_MIN=9
+UDP_SRC_MAX=109
+
+# (example of setting default params in your script)
+if [ -z "$DEST_IP" ]; then
+ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
+fi
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
+if [ -n "$DST_PORT" ]; then
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
+fi
+
+# General cleanup everything since last run
+[ -z "$APPEND" ] && pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ # The device name is extended with @name, using thread number to
+ # make then unique, but any name will do.
+ dev=${DEV}@${thread}
+
+ # Add remove all other devices and add_device $dev to thread
+ [ -z "$APPEND" ] && pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # Notice config queue to map to cpu (mirrors smp_processor_id())
+ # It is beneficial to map IRQ /proc/irq/*/smp_affinity 1:1 to CPU number
+ pg_set $dev "flag QUEUE_MAP_CPU"
+
+ # Base config of dev
+ pg_set $dev "count $COUNT"
+ pg_set $dev "clone_skb $CLONE_SKB"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+
+ # Flag example disabling timestamping
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
+
+ if [ -n "$DST_PORT" ]; then
+ # Single destination port or random port range
+ pg_set $dev "flag UDPDST_RND"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
+ fi
+
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
+ # Setup random UDP port src range
+ pg_set $dev "flag UDPSRC_RND"
+ pg_set $dev "udp_src_min $UDP_SRC_MIN"
+ pg_set $dev "udp_src_max $UDP_SRC_MAX"
+done
+
+# Run if user hits control-c
+function print_result() {
+ # Print results
+ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+ done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap true SIGINT
+
+if [ -z "$APPEND" ]; then
+ # start_run
+ echo "Running... ctrl^C to stop" >&2
+ pg_ctrl "start"
+ echo "Done" >&2
+
+ print_result
+else
+ echo "Append mode: config done. Do more or use 'pg_ctrl start' to run"
+fi
diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
new file mode 100755
index 000000000..8bf2fdffb
--- /dev/null
+++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Script for max single flow performance
+# - If correctly tuned[1], single CPU 10G wirespeed small pkts is possible[2]
+#
+# Using pktgen "burst" option (use -b $N)
+# - To boost max performance
+# - Avail since: kernel v3.18
+# * commit 38b2cf2982dc73 ("net: pktgen: packet bursting via skb->xmit_more")
+# - This avoids writing the HW tailptr on every driver xmit
+# - The performance boost is impressive, see commit and blog [2]
+#
+# Notice: On purpose generates a single (UDP) flow towards target,
+# reason behind this is to only overload/activate a single CPU on
+# target host. And no randomness for pktgen also makes it faster.
+#
+# Tuning see:
+# [1] http://netoptimizer.blogspot.dk/2014/06/pktgen-for-network-overload-testing.html
+# [2] http://netoptimizer.blogspot.dk/2014/10/unlocked-10gbps-tx-wirespeed-smallest.html
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Set some default params, if they didn't get set
+if [ -z "$DEST_IP" ]; then
+ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
+fi
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$BURST" ] && BURST=32
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0" # No need for clones when bursting
+[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
+if [ -n "$DST_PORT" ]; then
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
+fi
+
+# General cleanup everything since last run
+[ -z "$APPEND" ] && pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ dev=${DEV}@${thread}
+
+ # Add remove all other devices and add_device $dev to thread
+ [ -z "$APPEND" ] && pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # Base config
+ pg_set $dev "flag QUEUE_MAP_CPU"
+ pg_set $dev "count $COUNT"
+ pg_set $dev "clone_skb $CLONE_SKB"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
+
+ if [ -n "$DST_PORT" ]; then
+ # Single destination port or random port range
+ pg_set $dev "flag UDPDST_RND"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
+ fi
+
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
+ # Setup burst, for easy testing -b 0 disable bursting
+ # (internally in pktgen default and minimum burst=1)
+ if [[ ${BURST} -ne 0 ]]; then
+ pg_set $dev "burst $BURST"
+ else
+ info "$dev: Not using burst"
+ fi
+done
+
+# Run if user hits control-c
+function print_result() {
+ # Print results
+ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+ done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap true SIGINT
+
+if [ -z "$APPEND" ]; then
+ echo "Running... ctrl^C to stop" >&2
+ pg_ctrl "start"
+
+ print_result
+else
+ echo "Append mode: config done. Do more or use 'pg_ctrl start' to run"
+fi
diff --git a/samples/pktgen/pktgen_sample04_many_flows.sh b/samples/pktgen/pktgen_sample04_many_flows.sh
new file mode 100755
index 000000000..cff51f861
--- /dev/null
+++ b/samples/pktgen/pktgen_sample04_many_flows.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Script example for many flows testing
+#
+# Number of simultaneous flows limited by variable $FLOWS
+# and number of packets per flow controlled by variable $FLOWLEN
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Set some default params, if they didn't get set
+if [ -z "$DEST_IP" ]; then
+ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
+fi
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
+if [ -n "$DST_PORT" ]; then
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
+fi
+
+# NOTICE: Script specific settings
+# =======
+# Limiting the number of concurrent flows ($FLOWS)
+# and also set how many packets each flow contains ($FLOWLEN)
+#
+[ -z "$FLOWS" ] && FLOWS="8000"
+[ -z "$FLOWLEN" ] && FLOWLEN="10"
+
+if [[ -n "$BURST" ]]; then
+ err 1 "Bursting not supported for this mode"
+fi
+
+# 198.18.0.0 / 198.19.255.255
+read -r SRC_MIN SRC_MAX <<< $(parse_addr 198.18.0.0/15)
+
+# General cleanup everything since last run
+[ -z "$APPEND" ] && pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ dev=${DEV}@${thread}
+
+ # Add remove all other devices and add_device $dev to thread
+ [ -z "$APPEND" ] && pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # Base config
+ pg_set $dev "flag QUEUE_MAP_CPU"
+ pg_set $dev "count $COUNT"
+ pg_set $dev "clone_skb $CLONE_SKB"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Single destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
+
+ if [ -n "$DST_PORT" ]; then
+ # Single destination port or random port range
+ pg_set $dev "flag UDPDST_RND"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
+ fi
+
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
+ # Randomize source IP-addresses
+ pg_set $dev "flag IPSRC_RND"
+ pg_set $dev "src_min $SRC_MIN"
+ pg_set $dev "src_max $SRC_MAX"
+
+ # Limit number of flows (max 65535)
+ pg_set $dev "flows $FLOWS"
+ #
+ # How many packets a flow will send, before flow "entry" is
+ # re-generated/setup.
+ pg_set $dev "flowlen $FLOWLEN"
+ #
+ # Flag FLOW_SEQ will cause $FLOWLEN packets from the same flow
+ # being send back-to-back, before next flow is selected
+ # incrementally. This helps lookup caches, and is more realistic.
+ #
+ pg_set $dev "flag FLOW_SEQ"
+
+done
+
+# Run if user hits control-c
+function print_result() {
+ # Print results
+ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+ done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap true SIGINT
+
+if [ -z "$APPEND" ]; then
+ echo "Running... ctrl^C to stop" >&2
+ pg_ctrl "start"
+
+ print_result
+else
+ echo "Append mode: config done. Do more or use 'pg_ctrl start' to run"
+fi
diff --git a/samples/pktgen/pktgen_sample05_flow_per_thread.sh b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
new file mode 100755
index 000000000..3578d0aa4
--- /dev/null
+++ b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Script will generate one flow per thread (-t N)
+# - Same destination IP
+# - Fake source IPs for each flow (fixed based on thread number)
+#
+# Useful for scale testing on receiver, to see whether silo'ing flows
+# works and scales. For optimal scalability (on receiver) each
+# separate-flow should not access shared variables/data. This script
+# helps magnify any of these scaling issues by overloading the receiver.
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Set some default params, if they didn't get set
+if [ -z "$DEST_IP" ]; then
+ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
+fi
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+[ -z "$BURST" ] && BURST=32
+[ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
+if [ -n "$DST_PORT" ]; then
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
+fi
+
+# General cleanup everything since last run
+[ -z "$APPEND" ] && pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ dev=${DEV}@${thread}
+
+ # Add remove all other devices and add_device $dev to thread
+ [ -z "$APPEND" ] && pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # Base config
+ pg_set $dev "flag QUEUE_MAP_CPU"
+ pg_set $dev "count $COUNT"
+ pg_set $dev "clone_skb $CLONE_SKB"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Single destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
+
+ if [ -n "$DST_PORT" ]; then
+ # Single destination port or random port range
+ pg_set $dev "flag UDPDST_RND"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
+ fi
+
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
+ # Setup source IP-addresses based on thread number
+ pg_set $dev "src_min 198.18.$((thread+1)).1"
+ pg_set $dev "src_max 198.18.$((thread+1)).1"
+
+ # Setup burst, for easy testing -b 0 disable bursting
+ # (internally in pktgen default and minimum burst=1)
+ if [[ ${BURST} -ne 0 ]]; then
+ pg_set $dev "burst $BURST"
+ else
+ info "$dev: Not using burst"
+ fi
+
+done
+
+# Run if user hits control-c
+function print_result() {
+ # Print results
+ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+ done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap true SIGINT
+
+if [ -z "$APPEND" ]; then
+ echo "Running... ctrl^C to stop" >&2
+ pg_ctrl "start"
+
+ print_result
+else
+ echo "Append mode: config done. Do more or use 'pg_ctrl start' to run"
+fi
diff --git a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
new file mode 100755
index 000000000..264cc5db9
--- /dev/null
+++ b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+#
+# Multiqueue: Using pktgen threads for sending on multiple CPUs
+# * adding devices to kernel threads which are in the same NUMA node
+# * bound devices queue's irq affinity to the threads, 1:1 mapping
+# * notice the naming scheme for keeping device names unique
+# * nameing scheme: dev@thread_number
+# * flow variation via random UDP source port
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+#
+# Required param: -i dev in $DEV
+source ${basedir}/parameters.sh
+
+# Base Config
+[ -z "$COUNT" ] && COUNT="20000000" # Zero means indefinitely
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+
+# Flow variation random source port between min and max
+UDP_SRC_MIN=9
+UDP_SRC_MAX=109
+
+node=`get_iface_node $DEV`
+irq_array=(`get_iface_irqs $DEV`)
+cpu_array=(`get_node_cpus $node`)
+
+[ $THREADS -gt ${#irq_array[*]} -o $THREADS -gt ${#cpu_array[*]} ] && \
+ err 1 "Thread number $THREADS exceeds: min (${#irq_array[*]},${#cpu_array[*]})"
+
+# (example of setting default params in your script)
+if [ -z "$DEST_IP" ]; then
+ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
+fi
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+if [ -n "$DEST_IP" ]; then
+ validate_addr${IP6} $DEST_IP
+ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
+fi
+if [ -n "$DST_PORT" ]; then
+ read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
+ validate_ports $UDP_DST_MIN $UDP_DST_MAX
+fi
+
+# General cleanup everything since last run
+[ -z "$APPEND" ] && pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((i = 0; i < $THREADS; i++)); do
+ # The device name is extended with @name, using thread number to
+ # make then unique, but any name will do.
+ # Set the queue's irq affinity to this $thread (processor)
+ # if '-f' is designated, offset cpu id
+ thread=${cpu_array[$((i+F_THREAD))]}
+ dev=${DEV}@${thread}
+ echo $thread > /proc/irq/${irq_array[$i]}/smp_affinity_list
+ info "irq ${irq_array[$i]} is set affinity to `cat /proc/irq/${irq_array[$i]}/smp_affinity_list`"
+
+ # Add remove all other devices and add_device $dev to thread
+ [ -z "$APPEND" ] && pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # select queue and bind the queue and $dev in 1:1 relationship
+ queue_num=$i
+ info "queue number is $queue_num"
+ pg_set $dev "queue_map_min $queue_num"
+ pg_set $dev "queue_map_max $queue_num"
+
+ # Notice config queue to map to cpu (mirrors smp_processor_id())
+ # It is beneficial to map IRQ /proc/irq/*/smp_affinity 1:1 to CPU number
+ pg_set $dev "flag QUEUE_MAP_CPU"
+
+ # Base config of dev
+ pg_set $dev "count $COUNT"
+ pg_set $dev "clone_skb $CLONE_SKB"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+
+ # Flag example disabling timestamping
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst${IP6}_min $DST_MIN"
+ pg_set $dev "dst${IP6}_max $DST_MAX"
+
+ if [ -n "$DST_PORT" ]; then
+ # Single destination port or random port range
+ pg_set $dev "flag UDPDST_RND"
+ pg_set $dev "udp_dst_min $UDP_DST_MIN"
+ pg_set $dev "udp_dst_max $UDP_DST_MAX"
+ fi
+
+ [ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
+
+ # Setup random UDP port src range
+ pg_set $dev "flag UDPSRC_RND"
+ pg_set $dev "udp_src_min $UDP_SRC_MIN"
+ pg_set $dev "udp_src_max $UDP_SRC_MAX"
+done
+
+# Run if user hits control-c
+function print_result() {
+ # Print results
+ for ((i = 0; i < $THREADS; i++)); do
+ thread=${cpu_array[$((i+F_THREAD))]}
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+ done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap true SIGINT
+
+# start_run
+if [ -z "$APPEND" ]; then
+ echo "Running... ctrl^C to stop" >&2
+ pg_ctrl "start"
+ echo "Done" >&2
+
+ print_result
+else
+ echo "Append mode: config done. Do more or use 'pg_ctrl start' to run"
+fi
diff --git a/samples/qmi/Makefile b/samples/qmi/Makefile
new file mode 100644
index 000000000..641943d40
--- /dev/null
+++ b/samples/qmi/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SAMPLE_QMI_CLIENT) += qmi_sample_client.o
diff --git a/samples/qmi/qmi_sample_client.c b/samples/qmi/qmi_sample_client.c
new file mode 100644
index 000000000..c045e3d24
--- /dev/null
+++ b/samples/qmi/qmi_sample_client.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sample in-kernel QMI client driver
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/qrtr.h>
+#include <linux/net.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <net/sock.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define PING_REQ1_TLV_TYPE 0x1
+#define PING_RESP1_TLV_TYPE 0x2
+#define PING_OPT1_TLV_TYPE 0x10
+#define PING_OPT2_TLV_TYPE 0x11
+
+#define DATA_REQ1_TLV_TYPE 0x1
+#define DATA_RESP1_TLV_TYPE 0x2
+#define DATA_OPT1_TLV_TYPE 0x10
+#define DATA_OPT2_TLV_TYPE 0x11
+
+#define TEST_MED_DATA_SIZE_V01 8192
+#define TEST_MAX_NAME_SIZE_V01 255
+
+#define TEST_PING_REQ_MSG_ID_V01 0x20
+#define TEST_DATA_REQ_MSG_ID_V01 0x21
+
+#define TEST_PING_REQ_MAX_MSG_LEN_V01 266
+#define TEST_DATA_REQ_MAX_MSG_LEN_V01 8456
+
+struct test_name_type_v01 {
+ u32 name_len;
+ char name[TEST_MAX_NAME_SIZE_V01];
+};
+
+static const struct qmi_elem_info test_name_type_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct test_name_type_v01,
+ name_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = TEST_MAX_NAME_SIZE_V01,
+ .elem_size = sizeof(char),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct test_name_type_v01,
+ name),
+ },
+ {}
+};
+
+struct test_ping_req_msg_v01 {
+ char ping[4];
+
+ u8 client_name_valid;
+ struct test_name_type_v01 client_name;
+};
+
+static const struct qmi_elem_info test_ping_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 4,
+ .elem_size = sizeof(char),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = PING_REQ1_TLV_TYPE,
+ .offset = offsetof(struct test_ping_req_msg_v01,
+ ping),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = PING_OPT1_TLV_TYPE,
+ .offset = offsetof(struct test_ping_req_msg_v01,
+ client_name_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct test_name_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = PING_OPT1_TLV_TYPE,
+ .offset = offsetof(struct test_ping_req_msg_v01,
+ client_name),
+ .ei_array = test_name_type_v01_ei,
+ },
+ {}
+};
+
+struct test_ping_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+
+ u8 pong_valid;
+ char pong[4];
+
+ u8 service_name_valid;
+ struct test_name_type_v01 service_name;
+};
+
+static const struct qmi_elem_info test_ping_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = PING_RESP1_TLV_TYPE,
+ .offset = offsetof(struct test_ping_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = PING_OPT1_TLV_TYPE,
+ .offset = offsetof(struct test_ping_resp_msg_v01,
+ pong_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 4,
+ .elem_size = sizeof(char),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = PING_OPT1_TLV_TYPE,
+ .offset = offsetof(struct test_ping_resp_msg_v01,
+ pong),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = PING_OPT2_TLV_TYPE,
+ .offset = offsetof(struct test_ping_resp_msg_v01,
+ service_name_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct test_name_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = PING_OPT2_TLV_TYPE,
+ .offset = offsetof(struct test_ping_resp_msg_v01,
+ service_name),
+ .ei_array = test_name_type_v01_ei,
+ },
+ {}
+};
+
+struct test_data_req_msg_v01 {
+ u32 data_len;
+ u8 data[TEST_MED_DATA_SIZE_V01];
+
+ u8 client_name_valid;
+ struct test_name_type_v01 client_name;
+};
+
+static const struct qmi_elem_info test_data_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = DATA_REQ1_TLV_TYPE,
+ .offset = offsetof(struct test_data_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = TEST_MED_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = DATA_REQ1_TLV_TYPE,
+ .offset = offsetof(struct test_data_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = DATA_OPT1_TLV_TYPE,
+ .offset = offsetof(struct test_data_req_msg_v01,
+ client_name_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct test_name_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = DATA_OPT1_TLV_TYPE,
+ .offset = offsetof(struct test_data_req_msg_v01,
+ client_name),
+ .ei_array = test_name_type_v01_ei,
+ },
+ {}
+};
+
+struct test_data_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+
+ u8 data_valid;
+ u32 data_len;
+ u8 data[TEST_MED_DATA_SIZE_V01];
+
+ u8 service_name_valid;
+ struct test_name_type_v01 service_name;
+};
+
+static const struct qmi_elem_info test_data_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = DATA_RESP1_TLV_TYPE,
+ .offset = offsetof(struct test_data_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = DATA_OPT1_TLV_TYPE,
+ .offset = offsetof(struct test_data_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = DATA_OPT1_TLV_TYPE,
+ .offset = offsetof(struct test_data_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = TEST_MED_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = DATA_OPT1_TLV_TYPE,
+ .offset = offsetof(struct test_data_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = DATA_OPT2_TLV_TYPE,
+ .offset = offsetof(struct test_data_resp_msg_v01,
+ service_name_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct test_name_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = DATA_OPT2_TLV_TYPE,
+ .offset = offsetof(struct test_data_resp_msg_v01,
+ service_name),
+ .ei_array = test_name_type_v01_ei,
+ },
+ {}
+};
+
+/*
+ * ping_write() - ping_pong debugfs file write handler
+ * @file: debugfs file context
+ * @user_buf: reference to the user data (ignored)
+ * @count: number of bytes in @user_buf
+ * @ppos: offset in @file to write
+ *
+ * This function allows user space to send out a ping_pong QMI encoded message
+ * to the associated remote test service and will return with the result of the
+ * transaction. It serves as an example of how to provide a custom response
+ * handler.
+ *
+ * Return: @count, or negative errno on failure.
+ */
+static ssize_t ping_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct qmi_handle *qmi = file->private_data;
+ struct test_ping_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ memcpy(req.ping, "ping", sizeof(req.ping));
+
+ ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(qmi, NULL, &txn,
+ TEST_PING_REQ_MSG_ID_V01,
+ TEST_PING_REQ_MAX_MSG_LEN_V01,
+ test_ping_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0)
+ count = ret;
+
+ return count;
+}
+
+static const struct file_operations ping_fops = {
+ .open = simple_open,
+ .write = ping_write,
+};
+
+static void ping_pong_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ const struct test_ping_resp_msg_v01 *resp = data;
+
+ if (!txn) {
+ pr_err("spurious ping response\n");
+ return;
+ }
+
+ if (resp->resp.result == QMI_RESULT_FAILURE_V01)
+ txn->result = -ENXIO;
+ else if (!resp->pong_valid || memcmp(resp->pong, "pong", 4))
+ txn->result = -EINVAL;
+
+ complete(&txn->completion);
+}
+
+/*
+ * data_write() - data debugfs file write handler
+ * @file: debugfs file context
+ * @user_buf: reference to the user data
+ * @count: number of bytes in @user_buf
+ * @ppos: offset in @file to write
+ *
+ * This function allows user space to send out a data QMI encoded message to
+ * the associated remote test service and will return with the result of the
+ * transaction. It serves as an example of how to have the QMI helpers decode a
+ * transaction response into a provided object automatically.
+ *
+ * Return: @count, or negative errno on failure.
+ */
+static ssize_t data_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+
+{
+ struct qmi_handle *qmi = file->private_data;
+ struct test_data_resp_msg_v01 *resp;
+ struct test_data_req_msg_v01 *req;
+ struct qmi_txn txn;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->data_len = min_t(size_t, sizeof(req->data), count);
+ if (copy_from_user(req->data, user_buf, req->data_len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = qmi_txn_init(qmi, &txn, test_data_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(qmi, NULL, &txn,
+ TEST_DATA_REQ_MSG_ID_V01,
+ TEST_DATA_REQ_MAX_MSG_LEN_V01,
+ test_data_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ goto out;
+ } else if (!resp->data_valid ||
+ resp->data_len != req->data_len ||
+ memcmp(resp->data, req->data, req->data_len)) {
+ pr_err("response data doesn't match expectation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = count;
+
+out:
+ kfree(resp);
+ kfree(req);
+
+ return ret;
+}
+
+static const struct file_operations data_fops = {
+ .open = simple_open,
+ .write = data_write,
+};
+
+static const struct qmi_msg_handler qmi_sample_handlers[] = {
+ {
+ .type = QMI_RESPONSE,
+ .msg_id = TEST_PING_REQ_MSG_ID_V01,
+ .ei = test_ping_resp_msg_v01_ei,
+ .decoded_size = sizeof(struct test_ping_req_msg_v01),
+ .fn = ping_pong_cb
+ },
+ {}
+};
+
+struct qmi_sample {
+ struct qmi_handle qmi;
+
+ struct dentry *de_dir;
+ struct dentry *de_data;
+ struct dentry *de_ping;
+};
+
+static struct dentry *qmi_debug_dir;
+
+static int qmi_sample_probe(struct platform_device *pdev)
+{
+ struct sockaddr_qrtr *sq;
+ struct qmi_sample *sample;
+ char path[20];
+ int ret;
+
+ sample = devm_kzalloc(&pdev->dev, sizeof(*sample), GFP_KERNEL);
+ if (!sample)
+ return -ENOMEM;
+
+ ret = qmi_handle_init(&sample->qmi, TEST_DATA_REQ_MAX_MSG_LEN_V01,
+ NULL,
+ qmi_sample_handlers);
+ if (ret < 0)
+ return ret;
+
+ sq = dev_get_platdata(&pdev->dev);
+ ret = kernel_connect(sample->qmi.sock, (struct sockaddr *)sq,
+ sizeof(*sq), 0);
+ if (ret < 0) {
+ pr_err("failed to connect to remote service port\n");
+ goto err_release_qmi_handle;
+ }
+
+ snprintf(path, sizeof(path), "%d:%d", sq->sq_node, sq->sq_port);
+
+ sample->de_dir = debugfs_create_dir(path, qmi_debug_dir);
+ if (IS_ERR(sample->de_dir)) {
+ ret = PTR_ERR(sample->de_dir);
+ goto err_release_qmi_handle;
+ }
+
+ sample->de_data = debugfs_create_file("data", 0600, sample->de_dir,
+ sample, &data_fops);
+ if (IS_ERR(sample->de_data)) {
+ ret = PTR_ERR(sample->de_data);
+ goto err_remove_de_dir;
+ }
+
+ sample->de_ping = debugfs_create_file("ping", 0600, sample->de_dir,
+ sample, &ping_fops);
+ if (IS_ERR(sample->de_ping)) {
+ ret = PTR_ERR(sample->de_ping);
+ goto err_remove_de_data;
+ }
+
+ platform_set_drvdata(pdev, sample);
+
+ return 0;
+
+err_remove_de_data:
+ debugfs_remove(sample->de_data);
+err_remove_de_dir:
+ debugfs_remove(sample->de_dir);
+err_release_qmi_handle:
+ qmi_handle_release(&sample->qmi);
+
+ return ret;
+}
+
+static int qmi_sample_remove(struct platform_device *pdev)
+{
+ struct qmi_sample *sample = platform_get_drvdata(pdev);
+
+ debugfs_remove(sample->de_ping);
+ debugfs_remove(sample->de_data);
+ debugfs_remove(sample->de_dir);
+
+ qmi_handle_release(&sample->qmi);
+
+ return 0;
+}
+
+static struct platform_driver qmi_sample_driver = {
+ .probe = qmi_sample_probe,
+ .remove = qmi_sample_remove,
+ .driver = {
+ .name = "qmi_sample_client",
+ },
+};
+
+static int qmi_sample_new_server(struct qmi_handle *qmi,
+ struct qmi_service *service)
+{
+ struct platform_device *pdev;
+ struct sockaddr_qrtr sq = { AF_QIPCRTR, service->node, service->port };
+ int ret;
+
+ pdev = platform_device_alloc("qmi_sample_client", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
+
+ ret = platform_device_add_data(pdev, &sq, sizeof(sq));
+ if (ret)
+ goto err_put_device;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto err_put_device;
+
+ service->priv = pdev;
+
+ return 0;
+
+err_put_device:
+ platform_device_put(pdev);
+
+ return ret;
+}
+
+static void qmi_sample_del_server(struct qmi_handle *qmi,
+ struct qmi_service *service)
+{
+ struct platform_device *pdev = service->priv;
+
+ platform_device_unregister(pdev);
+}
+
+static struct qmi_handle lookup_client;
+
+static const struct qmi_ops lookup_ops = {
+ .new_server = qmi_sample_new_server,
+ .del_server = qmi_sample_del_server,
+};
+
+static int qmi_sample_init(void)
+{
+ int ret;
+
+ qmi_debug_dir = debugfs_create_dir("qmi_sample", NULL);
+ if (IS_ERR(qmi_debug_dir)) {
+ pr_err("failed to create qmi_sample dir\n");
+ return PTR_ERR(qmi_debug_dir);
+ }
+
+ ret = platform_driver_register(&qmi_sample_driver);
+ if (ret)
+ goto err_remove_debug_dir;
+
+ ret = qmi_handle_init(&lookup_client, 0, &lookup_ops, NULL);
+ if (ret < 0)
+ goto err_unregister_driver;
+
+ qmi_add_lookup(&lookup_client, 15, 0, 0);
+
+ return 0;
+
+err_unregister_driver:
+ platform_driver_unregister(&qmi_sample_driver);
+err_remove_debug_dir:
+ debugfs_remove(qmi_debug_dir);
+
+ return ret;
+}
+
+static void qmi_sample_exit(void)
+{
+ qmi_handle_release(&lookup_client);
+
+ platform_driver_unregister(&qmi_sample_driver);
+
+ debugfs_remove(qmi_debug_dir);
+}
+
+module_init(qmi_sample_init);
+module_exit(qmi_sample_exit);
+
+MODULE_DESCRIPTION("Sample QMI client driver");
+MODULE_LICENSE("GPL v2");
diff --git a/samples/rpmsg/Makefile b/samples/rpmsg/Makefile
new file mode 100644
index 000000000..ddf9a5d13
--- /dev/null
+++ b/samples/rpmsg/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SAMPLE_RPMSG_CLIENT) += rpmsg_client_sample.o
diff --git a/samples/rpmsg/rpmsg_client_sample.c b/samples/rpmsg/rpmsg_client_sample.c
new file mode 100644
index 000000000..ae5081662
--- /dev/null
+++ b/samples/rpmsg/rpmsg_client_sample.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Remote processor messaging - sample client driver
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rpmsg.h>
+
+#define MSG "hello world!"
+
+static int count = 100;
+module_param(count, int, 0644);
+
+struct instance_data {
+ int rx_count;
+};
+
+static int rpmsg_sample_cb(struct rpmsg_device *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ int ret;
+ struct instance_data *idata = dev_get_drvdata(&rpdev->dev);
+
+ dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n",
+ ++idata->rx_count, src);
+
+ print_hex_dump_debug(__func__, DUMP_PREFIX_NONE, 16, 1, data, len,
+ true);
+
+ /* samples should not live forever */
+ if (idata->rx_count >= count) {
+ dev_info(&rpdev->dev, "goodbye!\n");
+ return 0;
+ }
+
+ /* send a new message now */
+ ret = rpmsg_send(rpdev->ept, MSG, strlen(MSG));
+ if (ret)
+ dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
+
+ return 0;
+}
+
+static int rpmsg_sample_probe(struct rpmsg_device *rpdev)
+{
+ int ret;
+ struct instance_data *idata;
+
+ dev_info(&rpdev->dev, "new channel: 0x%x -> 0x%x!\n",
+ rpdev->src, rpdev->dst);
+
+ idata = devm_kzalloc(&rpdev->dev, sizeof(*idata), GFP_KERNEL);
+ if (!idata)
+ return -ENOMEM;
+
+ dev_set_drvdata(&rpdev->dev, idata);
+
+ /* send a message to our remote processor */
+ ret = rpmsg_send(rpdev->ept, MSG, strlen(MSG));
+ if (ret) {
+ dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rpmsg_sample_remove(struct rpmsg_device *rpdev)
+{
+ dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n");
+}
+
+static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = {
+ { .name = "rpmsg-client-sample" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_sample_id_table);
+
+static struct rpmsg_driver rpmsg_sample_client = {
+ .drv.name = KBUILD_MODNAME,
+ .id_table = rpmsg_driver_sample_id_table,
+ .probe = rpmsg_sample_probe,
+ .callback = rpmsg_sample_cb,
+ .remove = rpmsg_sample_remove,
+};
+module_rpmsg_driver(rpmsg_sample_client);
+
+MODULE_DESCRIPTION("Remote processor messaging sample client driver");
+MODULE_LICENSE("GPL v2");
diff --git a/samples/rust/Kconfig b/samples/rust/Kconfig
new file mode 100644
index 000000000..841e0906e
--- /dev/null
+++ b/samples/rust/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig SAMPLES_RUST
+ bool "Rust samples"
+ depends on RUST
+ help
+ You can build sample Rust kernel code here.
+
+ If unsure, say N.
+
+if SAMPLES_RUST
+
+config SAMPLE_RUST_MINIMAL
+ tristate "Minimal"
+ help
+ This option builds the Rust minimal module sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_minimal.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_HOSTPROGS
+ bool "Host programs"
+ help
+ This option builds the Rust host program samples.
+
+ If unsure, say N.
+
+endif # SAMPLES_RUST
diff --git a/samples/rust/Makefile b/samples/rust/Makefile
new file mode 100644
index 000000000..1daba5f86
--- /dev/null
+++ b/samples/rust/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SAMPLE_RUST_MINIMAL) += rust_minimal.o
+
+subdir-$(CONFIG_SAMPLE_RUST_HOSTPROGS) += hostprogs
diff --git a/samples/rust/hostprogs/.gitignore b/samples/rust/hostprogs/.gitignore
new file mode 100644
index 000000000..a6c173da5
--- /dev/null
+++ b/samples/rust/hostprogs/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+single
diff --git a/samples/rust/hostprogs/Makefile b/samples/rust/hostprogs/Makefile
new file mode 100644
index 000000000..8ddcbd741
--- /dev/null
+++ b/samples/rust/hostprogs/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+hostprogs-always-y := single
+
+single-rust := y
diff --git a/samples/rust/hostprogs/a.rs b/samples/rust/hostprogs/a.rs
new file mode 100644
index 000000000..f7a4a3d0f
--- /dev/null
+++ b/samples/rust/hostprogs/a.rs
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust single host program sample: module `a`.
+
+pub(crate) fn f(x: i32) {
+ println!("The number is {}.", x);
+}
diff --git a/samples/rust/hostprogs/b.rs b/samples/rust/hostprogs/b.rs
new file mode 100644
index 000000000..c16758906
--- /dev/null
+++ b/samples/rust/hostprogs/b.rs
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust single host program sample: module `b`.
+
+pub(crate) const CONSTANT: i32 = 42;
diff --git a/samples/rust/hostprogs/single.rs b/samples/rust/hostprogs/single.rs
new file mode 100644
index 000000000..8c48a1193
--- /dev/null
+++ b/samples/rust/hostprogs/single.rs
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust single host program sample.
+
+mod a;
+mod b;
+
+fn main() {
+ println!("Hello world!");
+
+ a::f(b::CONSTANT);
+}
diff --git a/samples/rust/rust_minimal.rs b/samples/rust/rust_minimal.rs
new file mode 100644
index 000000000..54ad17685
--- /dev/null
+++ b/samples/rust/rust_minimal.rs
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust minimal sample.
+
+use kernel::prelude::*;
+
+module! {
+ type: RustMinimal,
+ name: b"rust_minimal",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust minimal sample",
+ license: b"GPL",
+}
+
+struct RustMinimal {
+ numbers: Vec<i32>,
+}
+
+impl kernel::Module for RustMinimal {
+ fn init(_module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust minimal sample (init)\n");
+ pr_info!("Am I built-in? {}\n", !cfg!(MODULE));
+
+ let mut numbers = Vec::new();
+ numbers.try_push(72)?;
+ numbers.try_push(108)?;
+ numbers.try_push(200)?;
+
+ Ok(RustMinimal { numbers })
+ }
+}
+
+impl Drop for RustMinimal {
+ fn drop(&mut self) {
+ pr_info!("My numbers are {:?}\n", self.numbers);
+ pr_info!("Rust minimal sample (exit)\n");
+ }
+}
diff --git a/samples/seccomp/.gitignore b/samples/seccomp/.gitignore
new file mode 100644
index 000000000..a6df0da77
--- /dev/null
+++ b/samples/seccomp/.gitignore
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/bpf-direct
+/bpf-fancy
+/dropper
+/user-trap
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
new file mode 100644
index 000000000..c85ae0ed8
--- /dev/null
+++ b/samples/seccomp/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+userprogs-always-y += bpf-fancy dropper bpf-direct user-trap
+
+bpf-fancy-objs := bpf-fancy.o bpf-helper.o
+
+userccflags += -I usr/include
diff --git a/samples/seccomp/bpf-direct.c b/samples/seccomp/bpf-direct.c
new file mode 100644
index 000000000..c09e4a17a
--- /dev/null
+++ b/samples/seccomp/bpf-direct.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Seccomp filter example for x86 (32-bit and 64-bit) with BPF macros
+ *
+ * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
+ * Author: Will Drewry <wad@chromium.org>
+ *
+ * The code may be used by anyone for any purpose,
+ * and can serve as a starting point for developing
+ * applications using prctl(PR_SET_SECCOMP, 2, ...).
+ */
+#if defined(__i386__) || defined(__x86_64__)
+#define SUPPORTED_ARCH 1
+#endif
+
+#if defined(SUPPORTED_ARCH)
+#define __USE_GNU 1
+#define _GNU_SOURCE 1
+
+#include <linux/types.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <linux/unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+
+#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
+#define syscall_nr (offsetof(struct seccomp_data, nr))
+
+#if defined(__i386__)
+#define REG_RESULT REG_EAX
+#define REG_SYSCALL REG_EAX
+#define REG_ARG0 REG_EBX
+#define REG_ARG1 REG_ECX
+#define REG_ARG2 REG_EDX
+#define REG_ARG3 REG_ESI
+#define REG_ARG4 REG_EDI
+#define REG_ARG5 REG_EBP
+#elif defined(__x86_64__)
+#define REG_RESULT REG_RAX
+#define REG_SYSCALL REG_RAX
+#define REG_ARG0 REG_RDI
+#define REG_ARG1 REG_RSI
+#define REG_ARG2 REG_RDX
+#define REG_ARG3 REG_R10
+#define REG_ARG4 REG_R8
+#define REG_ARG5 REG_R9
+#endif
+
+#ifndef PR_SET_NO_NEW_PRIVS
+#define PR_SET_NO_NEW_PRIVS 38
+#endif
+
+#ifndef SYS_SECCOMP
+#define SYS_SECCOMP 1
+#endif
+
+static void emulator(int nr, siginfo_t *info, void *void_context)
+{
+ ucontext_t *ctx = (ucontext_t *)(void_context);
+ int syscall;
+ char *buf;
+ ssize_t bytes;
+ size_t len;
+ if (info->si_code != SYS_SECCOMP)
+ return;
+ if (!ctx)
+ return;
+ syscall = ctx->uc_mcontext.gregs[REG_SYSCALL];
+ buf = (char *) ctx->uc_mcontext.gregs[REG_ARG1];
+ len = (size_t) ctx->uc_mcontext.gregs[REG_ARG2];
+
+ if (syscall != __NR_write)
+ return;
+ if (ctx->uc_mcontext.gregs[REG_ARG0] != STDERR_FILENO)
+ return;
+ /* Redirect stderr messages to stdout. Doesn't handle EINTR, etc */
+ ctx->uc_mcontext.gregs[REG_RESULT] = -1;
+ if (write(STDOUT_FILENO, "[ERR] ", 6) > 0) {
+ bytes = write(STDOUT_FILENO, buf, len);
+ ctx->uc_mcontext.gregs[REG_RESULT] = bytes;
+ }
+ return;
+}
+
+static int install_emulator(void)
+{
+ struct sigaction act;
+ sigset_t mask;
+ memset(&act, 0, sizeof(act));
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGSYS);
+
+ act.sa_sigaction = &emulator;
+ act.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGSYS, &act, NULL) < 0) {
+ perror("sigaction");
+ return -1;
+ }
+ if (sigprocmask(SIG_UNBLOCK, &mask, NULL)) {
+ perror("sigprocmask");
+ return -1;
+ }
+ return 0;
+}
+
+static int install_filter(void)
+{
+ struct sock_filter filter[] = {
+ /* Grab the system call number */
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, syscall_nr),
+ /* Jump table for the allowed syscalls */
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_rt_sigreturn, 0, 1),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+#ifdef __NR_sigreturn
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_sigreturn, 0, 1),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+#endif
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_exit_group, 0, 1),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_exit, 0, 1),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_read, 1, 0),
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, __NR_write, 3, 2),
+
+ /* Check that read is only using stdin. */
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, syscall_arg(0)),
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, STDIN_FILENO, 4, 0),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL),
+
+ /* Check that write is only using stdout */
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, syscall_arg(0)),
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, STDOUT_FILENO, 1, 0),
+ /* Trap attempts to write to stderr */
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, STDERR_FILENO, 1, 2),
+
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_TRAP),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL),
+ };
+ struct sock_fprog prog = {
+ .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
+ .filter = filter,
+ };
+
+ if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
+ perror("prctl(NO_NEW_PRIVS)");
+ return 1;
+ }
+
+
+ if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) {
+ perror("prctl");
+ return 1;
+ }
+ return 0;
+}
+
+#define payload(_c) (_c), sizeof((_c))
+int main(int argc, char **argv)
+{
+ char buf[4096];
+ ssize_t bytes = 0;
+ if (install_emulator())
+ return 1;
+ if (install_filter())
+ return 1;
+ syscall(__NR_write, STDOUT_FILENO,
+ payload("OHAI! WHAT IS YOUR NAME? "));
+ bytes = syscall(__NR_read, STDIN_FILENO, buf, sizeof(buf));
+ syscall(__NR_write, STDOUT_FILENO, payload("HELLO, "));
+ syscall(__NR_write, STDOUT_FILENO, buf, bytes);
+ syscall(__NR_write, STDERR_FILENO,
+ payload("Error message going to STDERR\n"));
+ return 0;
+}
+#else /* SUPPORTED_ARCH */
+/*
+ * This sample is x86-only. Since kernel samples are compiled with the
+ * host toolchain, a non-x86 host will result in using only the main()
+ * below.
+ */
+int main(void)
+{
+ return 1;
+}
+#endif /* SUPPORTED_ARCH */
diff --git a/samples/seccomp/bpf-fancy.c b/samples/seccomp/bpf-fancy.c
new file mode 100644
index 000000000..1ccb43502
--- /dev/null
+++ b/samples/seccomp/bpf-fancy.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Seccomp BPF example using a macro-based generator.
+ *
+ * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
+ * Author: Will Drewry <wad@chromium.org>
+ *
+ * The code may be used by anyone for any purpose,
+ * and can serve as a starting point for developing
+ * applications using prctl(PR_ATTACH_SECCOMP_FILTER).
+ */
+
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <linux/unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+
+#include "bpf-helper.h"
+
+#ifndef PR_SET_NO_NEW_PRIVS
+#define PR_SET_NO_NEW_PRIVS 38
+#endif
+
+int main(int argc, char **argv)
+{
+ struct bpf_labels l = {
+ .count = 0,
+ };
+ static const char msg1[] = "Please type something: ";
+ static const char msg2[] = "You typed: ";
+ char buf[256];
+ struct sock_filter filter[] = {
+ /* TODO: LOAD_SYSCALL_NR(arch) and enforce an arch */
+ LOAD_SYSCALL_NR,
+ SYSCALL(__NR_exit, ALLOW),
+ SYSCALL(__NR_exit_group, ALLOW),
+ SYSCALL(__NR_write, JUMP(&l, write_fd)),
+ SYSCALL(__NR_read, JUMP(&l, read)),
+ DENY, /* Don't passthrough into a label */
+
+ LABEL(&l, read),
+ ARG(0),
+ JNE(STDIN_FILENO, DENY),
+ ARG(1),
+ JNE((unsigned long)buf, DENY),
+ ARG(2),
+ JGE(sizeof(buf), DENY),
+ ALLOW,
+
+ LABEL(&l, write_fd),
+ ARG(0),
+ JEQ(STDOUT_FILENO, JUMP(&l, write_buf)),
+ JEQ(STDERR_FILENO, JUMP(&l, write_buf)),
+ DENY,
+
+ LABEL(&l, write_buf),
+ ARG(1),
+ JEQ((unsigned long)msg1, JUMP(&l, msg1_len)),
+ JEQ((unsigned long)msg2, JUMP(&l, msg2_len)),
+ JEQ((unsigned long)buf, JUMP(&l, buf_len)),
+ DENY,
+
+ LABEL(&l, msg1_len),
+ ARG(2),
+ JLT(sizeof(msg1), ALLOW),
+ DENY,
+
+ LABEL(&l, msg2_len),
+ ARG(2),
+ JLT(sizeof(msg2), ALLOW),
+ DENY,
+
+ LABEL(&l, buf_len),
+ ARG(2),
+ JLT(sizeof(buf), ALLOW),
+ DENY,
+ };
+ struct sock_fprog prog = {
+ .filter = filter,
+ .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
+ };
+ ssize_t bytes;
+ bpf_resolve_jumps(&l, filter, sizeof(filter)/sizeof(*filter));
+
+ if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
+ perror("prctl(NO_NEW_PRIVS)");
+ return 1;
+ }
+
+ if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) {
+ perror("prctl(SECCOMP)");
+ return 1;
+ }
+ syscall(__NR_write, STDOUT_FILENO, msg1, strlen(msg1));
+ bytes = syscall(__NR_read, STDIN_FILENO, buf, sizeof(buf)-1);
+ bytes = (bytes > 0 ? bytes : 0);
+ syscall(__NR_write, STDERR_FILENO, msg2, strlen(msg2));
+ syscall(__NR_write, STDERR_FILENO, buf, bytes);
+ /* Now get killed */
+ syscall(__NR_write, STDERR_FILENO, msg2, strlen(msg2)+2);
+ return 0;
+}
diff --git a/samples/seccomp/bpf-helper.c b/samples/seccomp/bpf-helper.c
new file mode 100644
index 000000000..ae260d77a
--- /dev/null
+++ b/samples/seccomp/bpf-helper.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Seccomp BPF helper functions
+ *
+ * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
+ * Author: Will Drewry <wad@chromium.org>
+ *
+ * The code may be used by anyone for any purpose,
+ * and can serve as a starting point for developing
+ * applications using prctl(PR_ATTACH_SECCOMP_FILTER).
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "bpf-helper.h"
+
+int bpf_resolve_jumps(struct bpf_labels *labels,
+ struct sock_filter *filter, size_t count)
+{
+ size_t i;
+
+ if (count < 1 || count > BPF_MAXINSNS)
+ return -1;
+ /*
+ * Walk it once, backwards, to build the label table and do fixups.
+ * Since backward jumps are disallowed by BPF, this is easy.
+ */
+ for (i = 0; i < count; ++i) {
+ size_t offset = count - i - 1;
+ struct sock_filter *instr = &filter[offset];
+ if (instr->code != (BPF_JMP+BPF_JA))
+ continue;
+ switch ((instr->jt<<8)|instr->jf) {
+ case (JUMP_JT<<8)|JUMP_JF:
+ if (labels->labels[instr->k].location == 0xffffffff) {
+ fprintf(stderr, "Unresolved label: '%s'\n",
+ labels->labels[instr->k].label);
+ return 1;
+ }
+ instr->k = labels->labels[instr->k].location -
+ (offset + 1);
+ instr->jt = 0;
+ instr->jf = 0;
+ continue;
+ case (LABEL_JT<<8)|LABEL_JF:
+ if (labels->labels[instr->k].location != 0xffffffff) {
+ fprintf(stderr, "Duplicate label use: '%s'\n",
+ labels->labels[instr->k].label);
+ return 1;
+ }
+ labels->labels[instr->k].location = offset;
+ instr->k = 0; /* fall through */
+ instr->jt = 0;
+ instr->jf = 0;
+ continue;
+ }
+ }
+ return 0;
+}
+
+/* Simple lookup table for labels. */
+__u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label)
+{
+ struct __bpf_label *begin = labels->labels, *end;
+ int id;
+
+ if (labels->count == BPF_LABELS_MAX) {
+ fprintf(stderr, "Too many labels\n");
+ exit(1);
+ }
+ if (labels->count == 0) {
+ begin->label = label;
+ begin->location = 0xffffffff;
+ labels->count++;
+ return 0;
+ }
+ end = begin + labels->count;
+ for (id = 0; begin < end; ++begin, ++id) {
+ if (!strcmp(label, begin->label))
+ return id;
+ }
+ begin->label = label;
+ begin->location = 0xffffffff;
+ labels->count++;
+ return id;
+}
+
+void seccomp_bpf_print(struct sock_filter *filter, size_t count)
+{
+ struct sock_filter *end = filter + count;
+ for ( ; filter < end; ++filter)
+ printf("{ code=%u,jt=%u,jf=%u,k=%u },\n",
+ filter->code, filter->jt, filter->jf, filter->k);
+}
diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h
new file mode 100644
index 000000000..417e48a4c
--- /dev/null
+++ b/samples/seccomp/bpf-helper.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Example wrapper around BPF macros.
+ *
+ * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
+ * Author: Will Drewry <wad@chromium.org>
+ *
+ * The code may be used by anyone for any purpose,
+ * and can serve as a starting point for developing
+ * applications using prctl(PR_SET_SECCOMP, 2, ...).
+ *
+ * No guarantees are provided with respect to the correctness
+ * or functionality of this code.
+ */
+#ifndef __BPF_HELPER_H__
+#define __BPF_HELPER_H__
+
+#include <asm/bitsperlong.h> /* for __BITS_PER_LONG */
+#include <endian.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h> /* for seccomp_data */
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <stddef.h>
+
+#define BPF_LABELS_MAX 256
+struct bpf_labels {
+ int count;
+ struct __bpf_label {
+ const char *label;
+ __u32 location;
+ } labels[BPF_LABELS_MAX];
+};
+
+int bpf_resolve_jumps(struct bpf_labels *labels,
+ struct sock_filter *filter, size_t count);
+__u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label);
+void seccomp_bpf_print(struct sock_filter *filter, size_t count);
+
+#define JUMP_JT 0xff
+#define JUMP_JF 0xff
+#define LABEL_JT 0xfe
+#define LABEL_JF 0xfe
+
+#define ALLOW \
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
+#define DENY \
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL)
+#define JUMP(labels, label) \
+ BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
+ JUMP_JT, JUMP_JF)
+#define LABEL(labels, label) \
+ BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
+ LABEL_JT, LABEL_JF)
+#define SYSCALL(nr, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (nr), 0, 1), \
+ jt
+
+/* Lame, but just an example */
+#define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label)
+
+#define EXPAND(...) __VA_ARGS__
+
+/* Ensure that we load the logically correct offset. */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
+#else
+#error "Unknown endianness"
+#endif
+
+/* Map all width-sensitive operations */
+#if __BITS_PER_LONG == 32
+
+#define JEQ(x, jt) JEQ32(x, EXPAND(jt))
+#define JNE(x, jt) JNE32(x, EXPAND(jt))
+#define JGT(x, jt) JGT32(x, EXPAND(jt))
+#define JLT(x, jt) JLT32(x, EXPAND(jt))
+#define JGE(x, jt) JGE32(x, EXPAND(jt))
+#define JLE(x, jt) JLE32(x, EXPAND(jt))
+#define JA(x, jt) JA32(x, EXPAND(jt))
+#define ARG(i) ARG_32(i)
+
+#elif __BITS_PER_LONG == 64
+
+/* Ensure that we load the logically correct offset. */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define ENDIAN(_lo, _hi) _lo, _hi
+#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define ENDIAN(_lo, _hi) _hi, _lo
+#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
+#endif
+
+union arg64 {
+ struct {
+ __u32 ENDIAN(lo32, hi32);
+ };
+ __u64 u64;
+};
+
+#define JEQ(x, jt) \
+ JEQ64(((union arg64){.u64 = (x)}).lo32, \
+ ((union arg64){.u64 = (x)}).hi32, \
+ EXPAND(jt))
+#define JGT(x, jt) \
+ JGT64(((union arg64){.u64 = (x)}).lo32, \
+ ((union arg64){.u64 = (x)}).hi32, \
+ EXPAND(jt))
+#define JGE(x, jt) \
+ JGE64(((union arg64){.u64 = (x)}).lo32, \
+ ((union arg64){.u64 = (x)}).hi32, \
+ EXPAND(jt))
+#define JNE(x, jt) \
+ JNE64(((union arg64){.u64 = (x)}).lo32, \
+ ((union arg64){.u64 = (x)}).hi32, \
+ EXPAND(jt))
+#define JLT(x, jt) \
+ JLT64(((union arg64){.u64 = (x)}).lo32, \
+ ((union arg64){.u64 = (x)}).hi32, \
+ EXPAND(jt))
+#define JLE(x, jt) \
+ JLE64(((union arg64){.u64 = (x)}).lo32, \
+ ((union arg64){.u64 = (x)}).hi32, \
+ EXPAND(jt))
+
+#define JA(x, jt) \
+ JA64(((union arg64){.u64 = (x)}).lo32, \
+ ((union arg64){.u64 = (x)}).hi32, \
+ EXPAND(jt))
+#define ARG(i) ARG_64(i)
+
+#else
+#error __BITS_PER_LONG value unusable.
+#endif
+
+/* Loads the arg into A */
+#define ARG_32(idx) \
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
+
+/* Loads lo into M[0] and hi into M[1] and A */
+#define ARG_64(idx) \
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
+ BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \
+ BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
+
+#define JEQ32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 0, 1), \
+ jt
+
+#define JNE32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
+ jt
+
+#define JA32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
+ jt
+
+#define JGE32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
+ jt
+
+#define JGT32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
+ jt
+
+#define JLE32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
+ jt
+
+#define JLT32(value, jt) \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
+ jt
+
+/*
+ * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
+ * A and M[1]. This invariant is kept by restoring A if necessary.
+ */
+#define JEQ64(lo, hi, jt) \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
+ /* if (lo != arg.lo) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ jt, \
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define JNE64(lo, hi, jt) \
+ /* if (hi != arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo != arg.lo) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ jt, \
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define JA64(lo, hi, jt) \
+ /* if (hi & arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo & arg.lo) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ jt, \
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define JGE64(lo, hi, jt) \
+ /* if (hi > arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo >= arg.lo) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ jt, \
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define JGT64(lo, hi, jt) \
+ /* if (hi > arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo > arg.lo) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ jt, \
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define JLE64(lo, hi, jt) \
+ /* if (hi < arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo <= arg.lo) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ jt, \
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define JLT64(lo, hi, jt) \
+ /* if (hi < arg.hi) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
+ /* if (hi != arg.hi) goto NOMATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
+ BPF_STMT(BPF_LD+BPF_MEM, 0), \
+ /* if (lo < arg.lo) goto MATCH; */ \
+ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
+ BPF_STMT(BPF_LD+BPF_MEM, 1), \
+ jt, \
+ BPF_STMT(BPF_LD+BPF_MEM, 1)
+
+#define LOAD_SYSCALL_NR \
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
+ offsetof(struct seccomp_data, nr))
+
+#endif /* __BPF_HELPER_H__ */
diff --git a/samples/seccomp/dropper.c b/samples/seccomp/dropper.c
new file mode 100644
index 000000000..4bca4b70f
--- /dev/null
+++ b/samples/seccomp/dropper.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Naive system call dropper built on seccomp_filter.
+ *
+ * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
+ * Author: Will Drewry <wad@chromium.org>
+ *
+ * The code may be used by anyone for any purpose,
+ * and can serve as a starting point for developing
+ * applications using prctl(PR_SET_SECCOMP, 2, ...).
+ *
+ * When run, returns the specified errno for the specified
+ * system call number against the given architecture.
+ *
+ */
+
+#include <errno.h>
+#include <linux/audit.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <linux/unistd.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <sys/prctl.h>
+#include <unistd.h>
+
+static int install_filter(int arch, int nr, int error)
+{
+ struct sock_filter filter[] = {
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
+ (offsetof(struct seccomp_data, arch))),
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, arch, 0, 3),
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
+ (offsetof(struct seccomp_data, nr))),
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, nr, 0, 1),
+ BPF_STMT(BPF_RET+BPF_K,
+ SECCOMP_RET_ERRNO|(error & SECCOMP_RET_DATA)),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+ };
+ struct sock_fprog prog = {
+ .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
+ .filter = filter,
+ };
+ if (error == -1) {
+ struct sock_filter kill = BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL);
+ filter[4] = kill;
+ }
+ if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
+ perror("prctl(NO_NEW_PRIVS)");
+ return 1;
+ }
+ if (prctl(PR_SET_SECCOMP, 2, &prog)) {
+ perror("prctl(PR_SET_SECCOMP)");
+ return 1;
+ }
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ if (argc < 5) {
+ fprintf(stderr, "Usage:\n"
+ "dropper <arch> <syscall_nr> <errno> <prog> [<args>]\n"
+ "Hint: AUDIT_ARCH_I386: 0x%X\n"
+ " AUDIT_ARCH_X86_64: 0x%X\n"
+ " errno == -1 means SECCOMP_RET_KILL\n"
+ "\n", AUDIT_ARCH_I386, AUDIT_ARCH_X86_64);
+ return 1;
+ }
+ if (install_filter(strtol(argv[1], NULL, 0), strtol(argv[2], NULL, 0),
+ strtol(argv[3], NULL, 0)))
+ return 1;
+ execv(argv[4], &argv[4]);
+ printf("Failed to execv\n");
+ return 255;
+}
diff --git a/samples/seccomp/user-trap.c b/samples/seccomp/user-trap.c
new file mode 100644
index 000000000..20291ec64
--- /dev/null
+++ b/samples/seccomp/user-trap.c
@@ -0,0 +1,375 @@
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stddef.h>
+#include <sys/sysmacros.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/user.h>
+#include <sys/ioctl.h>
+#include <sys/ptrace.h>
+#include <sys/mount.h>
+#include <linux/limits.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+
+static int seccomp(unsigned int op, unsigned int flags, void *args)
+{
+ errno = 0;
+ return syscall(__NR_seccomp, op, flags, args);
+}
+
+static int send_fd(int sock, int fd)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ char buf[CMSG_SPACE(sizeof(int))] = {0}, c = 'c';
+ struct iovec io = {
+ .iov_base = &c,
+ .iov_len = 1,
+ };
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+ msg.msg_control = buf;
+ msg.msg_controllen = sizeof(buf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ *((int *)CMSG_DATA(cmsg)) = fd;
+ msg.msg_controllen = cmsg->cmsg_len;
+
+ if (sendmsg(sock, &msg, 0) < 0) {
+ perror("sendmsg");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int recv_fd(int sock)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ char buf[CMSG_SPACE(sizeof(int))] = {0}, c = 'c';
+ struct iovec io = {
+ .iov_base = &c,
+ .iov_len = 1,
+ };
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+ msg.msg_control = buf;
+ msg.msg_controllen = sizeof(buf);
+
+ if (recvmsg(sock, &msg, 0) < 0) {
+ perror("recvmsg");
+ return -1;
+ }
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+
+ return *((int *)CMSG_DATA(cmsg));
+}
+
+static int user_trap_syscall(int nr, unsigned int flags)
+{
+ struct sock_filter filter[] = {
+ BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
+ offsetof(struct seccomp_data, nr)),
+ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, nr, 0, 1),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_USER_NOTIF),
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+ };
+
+ struct sock_fprog prog = {
+ .len = (unsigned short)ARRAY_SIZE(filter),
+ .filter = filter,
+ };
+
+ return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog);
+}
+
+static int handle_req(struct seccomp_notif *req,
+ struct seccomp_notif_resp *resp, int listener)
+{
+ char path[PATH_MAX], source[PATH_MAX], target[PATH_MAX];
+ int ret = -1, mem;
+
+ resp->id = req->id;
+ resp->error = -EPERM;
+ resp->val = 0;
+
+ if (req->data.nr != __NR_mount) {
+ fprintf(stderr, "huh? trapped something besides mount? %d\n", req->data.nr);
+ return -1;
+ }
+
+ /* Only allow bind mounts. */
+ if (!(req->data.args[3] & MS_BIND))
+ return 0;
+
+ /*
+ * Ok, let's read the task's memory to see where they wanted their
+ * mount to go.
+ */
+ snprintf(path, sizeof(path), "/proc/%d/mem", req->pid);
+ mem = open(path, O_RDONLY);
+ if (mem < 0) {
+ perror("open mem");
+ return -1;
+ }
+
+ /*
+ * Now we avoid a TOCTOU: we referred to a pid by its pid, but since
+ * the pid that made the syscall may have died, we need to confirm that
+ * the pid is still valid after we open its /proc/pid/mem file. We can
+ * ask the listener fd this as follows.
+ *
+ * Note that this check should occur *after* any task-specific
+ * resources are opened, to make sure that the task has not died and
+ * we're not wrongly reading someone else's state in order to make
+ * decisions.
+ */
+ if (ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req->id) < 0) {
+ fprintf(stderr, "task died before we could map its memory\n");
+ goto out;
+ }
+
+ /*
+ * Phew, we've got the right /proc/pid/mem. Now we can read it. Note
+ * that to avoid another TOCTOU, we should read all of the pointer args
+ * before we decide to allow the syscall.
+ */
+ if (lseek(mem, req->data.args[0], SEEK_SET) < 0) {
+ perror("seek");
+ goto out;
+ }
+
+ ret = read(mem, source, sizeof(source));
+ if (ret < 0) {
+ perror("read");
+ goto out;
+ }
+
+ if (lseek(mem, req->data.args[1], SEEK_SET) < 0) {
+ perror("seek");
+ goto out;
+ }
+
+ ret = read(mem, target, sizeof(target));
+ if (ret < 0) {
+ perror("read");
+ goto out;
+ }
+
+ /*
+ * Our policy is to only allow bind mounts inside /tmp. This isn't very
+ * interesting, because we could do unprivlieged bind mounts with user
+ * namespaces already, but you get the idea.
+ */
+ if (!strncmp(source, "/tmp/", 5) && !strncmp(target, "/tmp/", 5)) {
+ if (mount(source, target, NULL, req->data.args[3], NULL) < 0) {
+ ret = -1;
+ perror("actual mount");
+ goto out;
+ }
+ resp->error = 0;
+ }
+
+ /* Even if we didn't allow it because of policy, generating the
+ * response was be a success, because we want to tell the worker EPERM.
+ */
+ ret = 0;
+
+out:
+ close(mem);
+ return ret;
+}
+
+int main(void)
+{
+ int sk_pair[2], ret = 1, status, listener;
+ pid_t worker = 0 , tracer = 0;
+
+ if (socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair) < 0) {
+ perror("socketpair");
+ return 1;
+ }
+
+ worker = fork();
+ if (worker < 0) {
+ perror("fork");
+ goto close_pair;
+ }
+
+ if (worker == 0) {
+ listener = user_trap_syscall(__NR_mount,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ if (listener < 0) {
+ perror("seccomp");
+ exit(1);
+ }
+
+ /*
+ * Drop privileges. We definitely can't mount as uid 1000.
+ */
+ if (setuid(1000) < 0) {
+ perror("setuid");
+ exit(1);
+ }
+
+ /*
+ * Send the listener to the parent; also serves as
+ * synchronization.
+ */
+ if (send_fd(sk_pair[1], listener) < 0)
+ exit(1);
+ close(listener);
+
+ if (mkdir("/tmp/foo", 0755) < 0) {
+ perror("mkdir");
+ exit(1);
+ }
+
+ /*
+ * Try a bad mount just for grins.
+ */
+ if (mount("/dev/sda", "/tmp/foo", NULL, 0, NULL) != -1) {
+ fprintf(stderr, "huh? mounted /dev/sda?\n");
+ exit(1);
+ }
+
+ if (errno != EPERM) {
+ perror("bad error from mount");
+ exit(1);
+ }
+
+ /*
+ * Ok, we expect this one to succeed.
+ */
+ if (mount("/tmp/foo", "/tmp/foo", NULL, MS_BIND, NULL) < 0) {
+ perror("mount");
+ exit(1);
+ }
+
+ exit(0);
+ }
+
+ /*
+ * Get the listener from the child.
+ */
+ listener = recv_fd(sk_pair[0]);
+ if (listener < 0)
+ goto out_kill;
+
+ /*
+ * Fork a task to handle the requests. This isn't strictly necessary,
+ * but it makes the particular writing of this sample easier, since we
+ * can just wait ofr the tracee to exit and kill the tracer.
+ */
+ tracer = fork();
+ if (tracer < 0) {
+ perror("fork");
+ goto out_kill;
+ }
+
+ if (tracer == 0) {
+ struct seccomp_notif *req;
+ struct seccomp_notif_resp *resp;
+ struct seccomp_notif_sizes sizes;
+
+ if (seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes) < 0) {
+ perror("seccomp(GET_NOTIF_SIZES)");
+ goto out_close;
+ }
+
+ req = malloc(sizes.seccomp_notif);
+ if (!req)
+ goto out_close;
+
+ resp = malloc(sizes.seccomp_notif_resp);
+ if (!resp)
+ goto out_req;
+ memset(resp, 0, sizes.seccomp_notif_resp);
+
+ while (1) {
+ memset(req, 0, sizes.seccomp_notif);
+ if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, req)) {
+ perror("ioctl recv");
+ goto out_resp;
+ }
+
+ if (handle_req(req, resp, listener) < 0)
+ goto out_resp;
+
+ /*
+ * ENOENT here means that the task may have gotten a
+ * signal and restarted the syscall. It's up to the
+ * handler to decide what to do in this case, but for
+ * the sample code, we just ignore it. Probably
+ * something better should happen, like undoing the
+ * mount, or keeping track of the args to make sure we
+ * don't do it again.
+ */
+ if (ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, resp) < 0 &&
+ errno != ENOENT) {
+ perror("ioctl send");
+ goto out_resp;
+ }
+ }
+out_resp:
+ free(resp);
+out_req:
+ free(req);
+out_close:
+ close(listener);
+ exit(1);
+ }
+
+ close(listener);
+
+ if (waitpid(worker, &status, 0) != worker) {
+ perror("waitpid");
+ goto out_kill;
+ }
+
+ if (umount2("/tmp/foo", MNT_DETACH) < 0 && errno != EINVAL) {
+ perror("umount2");
+ goto out_kill;
+ }
+
+ if (remove("/tmp/foo") < 0 && errno != ENOENT) {
+ perror("remove");
+ exit(1);
+ }
+
+ if (!WIFEXITED(status) || WEXITSTATUS(status)) {
+ fprintf(stderr, "worker exited nonzero\n");
+ goto out_kill;
+ }
+
+ ret = 0;
+
+out_kill:
+ if (tracer > 0)
+ kill(tracer, SIGKILL);
+ if (worker > 0)
+ kill(worker, SIGKILL);
+
+close_pair:
+ close(sk_pair[0]);
+ close(sk_pair[1]);
+ return ret;
+}
diff --git a/samples/timers/.gitignore b/samples/timers/.gitignore
new file mode 100644
index 000000000..cd9ff7b95
--- /dev/null
+++ b/samples/timers/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/hpet_example
diff --git a/samples/timers/Makefile b/samples/timers/Makefile
new file mode 100644
index 000000000..e6836cdea
--- /dev/null
+++ b/samples/timers/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+userprogs-always-y += hpet_example
+
+userccflags += -I usr/include
diff --git a/samples/timers/hpet_example.c b/samples/timers/hpet_example.c
new file mode 100644
index 000000000..f1cb622f6
--- /dev/null
+++ b/samples/timers/hpet_example.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <memory.h>
+#include <malloc.h>
+#include <time.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <errno.h>
+#include <sys/time.h>
+#include <linux/hpet.h>
+
+
+extern void hpet_open_close(int, const char **);
+extern void hpet_info(int, const char **);
+extern void hpet_poll(int, const char **);
+extern void hpet_fasync(int, const char **);
+extern void hpet_read(int, const char **);
+
+#include <sys/poll.h>
+#include <sys/ioctl.h>
+
+struct hpet_command {
+ char *command;
+ void (*func)(int argc, const char ** argv);
+} hpet_command[] = {
+ {
+ "open-close",
+ hpet_open_close
+ },
+ {
+ "info",
+ hpet_info
+ },
+ {
+ "poll",
+ hpet_poll
+ },
+ {
+ "fasync",
+ hpet_fasync
+ },
+};
+
+int
+main(int argc, const char ** argv)
+{
+ unsigned int i;
+
+ argc--;
+ argv++;
+
+ if (!argc) {
+ fprintf(stderr, "-hpet: requires command\n");
+ return -1;
+ }
+
+
+ for (i = 0; i < (sizeof (hpet_command) / sizeof (hpet_command[0])); i++)
+ if (!strcmp(argv[0], hpet_command[i].command)) {
+ argc--;
+ argv++;
+ fprintf(stderr, "-hpet: executing %s\n",
+ hpet_command[i].command);
+ hpet_command[i].func(argc, argv);
+ return 0;
+ }
+
+ fprintf(stderr, "do_hpet: command %s not implemented\n", argv[0]);
+
+ return -1;
+}
+
+void
+hpet_open_close(int argc, const char **argv)
+{
+ int fd;
+
+ if (argc != 1) {
+ fprintf(stderr, "hpet_open_close: device-name\n");
+ return;
+ }
+
+ fd = open(argv[0], O_RDONLY);
+ if (fd < 0)
+ fprintf(stderr, "hpet_open_close: open failed\n");
+ else
+ close(fd);
+
+ return;
+}
+
+void
+hpet_info(int argc, const char **argv)
+{
+ struct hpet_info info;
+ int fd;
+
+ if (argc != 1) {
+ fprintf(stderr, "hpet_info: device-name\n");
+ return;
+ }
+
+ fd = open(argv[0], O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "hpet_info: open of %s failed\n", argv[0]);
+ return;
+ }
+
+ if (ioctl(fd, HPET_INFO, &info) < 0) {
+ fprintf(stderr, "hpet_info: failed to get info\n");
+ goto out;
+ }
+
+ fprintf(stderr, "hpet_info: hi_irqfreq 0x%lx hi_flags 0x%lx ",
+ info.hi_ireqfreq, info.hi_flags);
+ fprintf(stderr, "hi_hpet %d hi_timer %d\n",
+ info.hi_hpet, info.hi_timer);
+
+out:
+ close(fd);
+ return;
+}
+
+void
+hpet_poll(int argc, const char **argv)
+{
+ unsigned long freq;
+ int iterations, i, fd;
+ struct pollfd pfd;
+ struct hpet_info info;
+ struct timeval stv, etv;
+ struct timezone tz;
+ long usec;
+
+ if (argc != 3) {
+ fprintf(stderr, "hpet_poll: device-name freq iterations\n");
+ return;
+ }
+
+ freq = atoi(argv[1]);
+ iterations = atoi(argv[2]);
+
+ fd = open(argv[0], O_RDONLY);
+
+ if (fd < 0) {
+ fprintf(stderr, "hpet_poll: open of %s failed\n", argv[0]);
+ return;
+ }
+
+ if (ioctl(fd, HPET_IRQFREQ, freq) < 0) {
+ fprintf(stderr, "hpet_poll: HPET_IRQFREQ failed\n");
+ goto out;
+ }
+
+ if (ioctl(fd, HPET_INFO, &info) < 0) {
+ fprintf(stderr, "hpet_poll: failed to get info\n");
+ goto out;
+ }
+
+ fprintf(stderr, "hpet_poll: info.hi_flags 0x%lx\n", info.hi_flags);
+
+ if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) {
+ fprintf(stderr, "hpet_poll: HPET_EPI failed\n");
+ goto out;
+ }
+
+ if (ioctl(fd, HPET_IE_ON, 0) < 0) {
+ fprintf(stderr, "hpet_poll, HPET_IE_ON failed\n");
+ goto out;
+ }
+
+ pfd.fd = fd;
+ pfd.events = POLLIN;
+
+ for (i = 0; i < iterations; i++) {
+ pfd.revents = 0;
+ gettimeofday(&stv, &tz);
+ if (poll(&pfd, 1, -1) < 0)
+ fprintf(stderr, "hpet_poll: poll failed\n");
+ else {
+ long data;
+
+ gettimeofday(&etv, &tz);
+ usec = stv.tv_sec * 1000000 + stv.tv_usec;
+ usec = (etv.tv_sec * 1000000 + etv.tv_usec) - usec;
+
+ fprintf(stderr,
+ "hpet_poll: expired time = 0x%lx\n", usec);
+
+ fprintf(stderr, "hpet_poll: revents = 0x%x\n",
+ pfd.revents);
+
+ if (read(fd, &data, sizeof(data)) != sizeof(data)) {
+ fprintf(stderr, "hpet_poll: read failed\n");
+ }
+ else
+ fprintf(stderr, "hpet_poll: data 0x%lx\n",
+ data);
+ }
+ }
+
+out:
+ close(fd);
+ return;
+}
+
+static int hpet_sigio_count;
+
+static void
+hpet_sigio(int val)
+{
+ fprintf(stderr, "hpet_sigio: called\n");
+ hpet_sigio_count++;
+}
+
+void
+hpet_fasync(int argc, const char **argv)
+{
+ unsigned long freq;
+ int iterations, i, fd, value;
+ sig_t oldsig;
+ struct hpet_info info;
+
+ hpet_sigio_count = 0;
+ fd = -1;
+
+ if ((oldsig = signal(SIGIO, hpet_sigio)) == SIG_ERR) {
+ fprintf(stderr, "hpet_fasync: failed to set signal handler\n");
+ return;
+ }
+
+ if (argc != 3) {
+ fprintf(stderr, "hpet_fasync: device-name freq iterations\n");
+ goto out;
+ }
+
+ fd = open(argv[0], O_RDONLY);
+
+ if (fd < 0) {
+ fprintf(stderr, "hpet_fasync: failed to open %s\n", argv[0]);
+ return;
+ }
+
+
+ if ((fcntl(fd, F_SETOWN, getpid()) == 1) ||
+ ((value = fcntl(fd, F_GETFL)) == 1) ||
+ (fcntl(fd, F_SETFL, value | O_ASYNC) == 1)) {
+ fprintf(stderr, "hpet_fasync: fcntl failed\n");
+ goto out;
+ }
+
+ freq = atoi(argv[1]);
+ iterations = atoi(argv[2]);
+
+ if (ioctl(fd, HPET_IRQFREQ, freq) < 0) {
+ fprintf(stderr, "hpet_fasync: HPET_IRQFREQ failed\n");
+ goto out;
+ }
+
+ if (ioctl(fd, HPET_INFO, &info) < 0) {
+ fprintf(stderr, "hpet_fasync: failed to get info\n");
+ goto out;
+ }
+
+ fprintf(stderr, "hpet_fasync: info.hi_flags 0x%lx\n", info.hi_flags);
+
+ if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) {
+ fprintf(stderr, "hpet_fasync: HPET_EPI failed\n");
+ goto out;
+ }
+
+ if (ioctl(fd, HPET_IE_ON, 0) < 0) {
+ fprintf(stderr, "hpet_fasync, HPET_IE_ON failed\n");
+ goto out;
+ }
+
+ for (i = 0; i < iterations; i++) {
+ (void) pause();
+ fprintf(stderr, "hpet_fasync: count = %d\n", hpet_sigio_count);
+ }
+
+out:
+ signal(SIGIO, oldsig);
+
+ if (fd >= 0)
+ close(fd);
+
+ return;
+}
diff --git a/samples/trace_events/Makefile b/samples/trace_events/Makefile
new file mode 100644
index 000000000..b3808bb4c
--- /dev/null
+++ b/samples/trace_events/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# builds the trace events example kernel modules;
+# then to use one (as root): insmod <module_name.ko>
+
+# If you include a trace header outside of include/trace/events
+# then the file that does the #define CREATE_TRACE_POINTS must
+# have that tracer file in its main search path. This is because
+# define_trace.h will include it, and must be able to find it from
+# the include/trace directory.
+#
+# Here trace-events-sample.c does the CREATE_TRACE_POINTS.
+#
+CFLAGS_trace-events-sample.o := -I$(src)
+CFLAGS_trace_custom_sched.o := -I$(src)
+
+obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace-events-sample.o
+obj-$(CONFIG_SAMPLE_TRACE_CUSTOM_EVENTS) += trace_custom_sched.o
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
new file mode 100644
index 000000000..608c4ae3b
--- /dev/null
+++ b/samples/trace_events/trace-events-sample.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+
+/*
+ * Any file that uses trace points, must include the header.
+ * But only one file, must include the header by defining
+ * CREATE_TRACE_POINTS first. This will make the C code that
+ * creates the handles for the trace points.
+ */
+#define CREATE_TRACE_POINTS
+#include "trace-events-sample.h"
+
+static const char *random_strings[] = {
+ "Mother Goose",
+ "Snoopy",
+ "Gandalf",
+ "Frodo",
+ "One ring to rule them all"
+};
+
+static void do_simple_thread_func(int cnt, const char *fmt, ...)
+{
+ unsigned long bitmask[1] = {0xdeadbeefUL};
+ va_list va;
+ int array[6];
+ int len = cnt % 5;
+ int i;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ for (i = 0; i < len; i++)
+ array[i] = i + 1;
+ array[i] = 0;
+
+ va_start(va, fmt);
+
+ /* Silly tracepoints */
+ trace_foo_bar("hello", cnt, array, random_strings[len],
+ current->cpus_ptr, fmt, &va);
+
+ va_end(va);
+
+ trace_foo_with_template_simple("HELLO", cnt);
+
+ trace_foo_bar_with_cond("Some times print", cnt);
+
+ trace_foo_with_template_cond("prints other times", cnt);
+
+ trace_foo_with_template_print("I have to be different", cnt);
+
+ trace_foo_rel_loc("Hello __rel_loc", cnt, bitmask);
+}
+
+static void simple_thread_func(int cnt)
+{
+ do_simple_thread_func(cnt, "iter=%d", cnt);
+}
+
+static int simple_thread(void *arg)
+{
+ int cnt = 0;
+
+ while (!kthread_should_stop())
+ simple_thread_func(cnt++);
+
+ return 0;
+}
+
+static struct task_struct *simple_tsk;
+static struct task_struct *simple_tsk_fn;
+
+static void simple_thread_func_fn(int cnt)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ /* More silly tracepoints */
+ trace_foo_bar_with_fn("Look at me", cnt);
+ trace_foo_with_template_fn("Look at me too", cnt);
+}
+
+static int simple_thread_fn(void *arg)
+{
+ int cnt = 0;
+
+ while (!kthread_should_stop())
+ simple_thread_func_fn(cnt++);
+
+ return 0;
+}
+
+static DEFINE_MUTEX(thread_mutex);
+static int simple_thread_cnt;
+
+int foo_bar_reg(void)
+{
+ mutex_lock(&thread_mutex);
+ if (simple_thread_cnt++)
+ goto out;
+
+ pr_info("Starting thread for foo_bar_fn\n");
+ /*
+ * We shouldn't be able to start a trace when the module is
+ * unloading (there's other locks to prevent that). But
+ * for consistency sake, we still take the thread_mutex.
+ */
+ simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
+ out:
+ mutex_unlock(&thread_mutex);
+ return 0;
+}
+
+void foo_bar_unreg(void)
+{
+ mutex_lock(&thread_mutex);
+ if (--simple_thread_cnt)
+ goto out;
+
+ pr_info("Killing thread for foo_bar_fn\n");
+ if (simple_tsk_fn)
+ kthread_stop(simple_tsk_fn);
+ simple_tsk_fn = NULL;
+ out:
+ mutex_unlock(&thread_mutex);
+}
+
+static int __init trace_event_init(void)
+{
+ simple_tsk = kthread_run(simple_thread, NULL, "event-sample");
+ if (IS_ERR(simple_tsk))
+ return -1;
+
+ return 0;
+}
+
+static void __exit trace_event_exit(void)
+{
+ kthread_stop(simple_tsk);
+ mutex_lock(&thread_mutex);
+ if (simple_tsk_fn)
+ kthread_stop(simple_tsk_fn);
+ simple_tsk_fn = NULL;
+ mutex_unlock(&thread_mutex);
+}
+
+module_init(trace_event_init);
+module_exit(trace_event_exit);
+
+MODULE_AUTHOR("Steven Rostedt");
+MODULE_DESCRIPTION("trace-events-sample");
+MODULE_LICENSE("GPL");
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
new file mode 100644
index 000000000..1a9222620
--- /dev/null
+++ b/samples/trace_events/trace-events-sample.h
@@ -0,0 +1,608 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * If TRACE_SYSTEM is defined, that will be the directory created
+ * in the ftrace directory under /sys/kernel/tracing/events/<system>
+ *
+ * The define_trace.h below will also look for a file name of
+ * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
+ * In this case, it would look for sample-trace.h
+ *
+ * If the header name will be different than the system name
+ * (as in this case), then you can override the header name that
+ * define_trace.h will look up by defining TRACE_INCLUDE_FILE
+ *
+ * This file is called trace-events-sample.h but we want the system
+ * to be called "sample-trace". Therefore we must define the name of this
+ * file:
+ *
+ * #define TRACE_INCLUDE_FILE trace-events-sample
+ *
+ * As we do an the bottom of this file.
+ *
+ * Notice that TRACE_SYSTEM should be defined outside of #if
+ * protection, just like TRACE_INCLUDE_FILE.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sample-trace
+
+/*
+ * TRACE_SYSTEM is expected to be a C valid variable (alpha-numeric
+ * and underscore), although it may start with numbers. If for some
+ * reason it is not, you need to add the following lines:
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR sample_trace
+/*
+ * But the above is only needed if TRACE_SYSTEM is not alpha-numeric
+ * and underscored. By default, TRACE_SYSTEM_VAR will be equal to
+ * TRACE_SYSTEM. As TRACE_SYSTEM_VAR must be alpha-numeric, if
+ * TRACE_SYSTEM is not, then TRACE_SYSTEM_VAR must be defined with
+ * only alpha-numeric and underscores.
+ *
+ * The TRACE_SYSTEM_VAR is only used internally and not visible to
+ * user space.
+ */
+
+/*
+ * Notice that this file is not protected like a normal header.
+ * We also must allow for rereading of this file. The
+ *
+ * || defined(TRACE_HEADER_MULTI_READ)
+ *
+ * serves this purpose.
+ */
+#if !defined(_TRACE_EVENT_SAMPLE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_SAMPLE_H
+
+/*
+ * All trace headers should include tracepoint.h, until we finally
+ * make it into a standard header.
+ */
+#include <linux/tracepoint.h>
+
+/*
+ * The TRACE_EVENT macro is broken up into 5 parts.
+ *
+ * name: name of the trace point. This is also how to enable the tracepoint.
+ * A function called trace_foo_bar() will be created.
+ *
+ * proto: the prototype of the function trace_foo_bar()
+ * Here it is trace_foo_bar(char *foo, int bar).
+ *
+ * args: must match the arguments in the prototype.
+ * Here it is simply "foo, bar".
+ *
+ * struct: This defines the way the data will be stored in the ring buffer.
+ * The items declared here become part of a special structure
+ * called "__entry", which can be used in the fast_assign part of the
+ * TRACE_EVENT macro.
+ *
+ * Here are the currently defined types you can use:
+ *
+ * __field : Is broken up into type and name. Where type can be any
+ * primitive type (integer, long or pointer).
+ *
+ * __field(int, foo)
+ *
+ * __entry->foo = 5;
+ *
+ * __field_struct : This can be any static complex data type (struct, union
+ * but not an array). Be careful using complex types, as each
+ * event is limited in size, and copying large amounts of data
+ * into the ring buffer can slow things down.
+ *
+ * __field_struct(struct bar, foo)
+ *
+ * __entry->bar.x = y;
+
+ * __array: There are three fields (type, name, size). The type is the
+ * type of elements in the array, the name is the name of the array.
+ * size is the number of items in the array (not the total size).
+ *
+ * __array( char, foo, 10) is the same as saying: char foo[10];
+ *
+ * Assigning arrays can be done like any array:
+ *
+ * __entry->foo[0] = 'a';
+ *
+ * memcpy(__entry->foo, bar, 10);
+ *
+ * __dynamic_array: This is similar to array, but can vary its size from
+ * instance to instance of the tracepoint being called.
+ * Like __array, this too has three elements (type, name, size);
+ * type is the type of the element, name is the name of the array.
+ * The size is different than __array. It is not a static number,
+ * but the algorithm to figure out the length of the array for the
+ * specific instance of tracepoint. Again, size is the number of
+ * items in the array, not the total length in bytes.
+ *
+ * __dynamic_array( int, foo, bar) is similar to: int foo[bar];
+ *
+ * Note, unlike arrays, you must use the __get_dynamic_array() macro
+ * to access the array.
+ *
+ * memcpy(__get_dynamic_array(foo), bar, 10);
+ *
+ * Notice, that "__entry" is not needed here.
+ *
+ * __string: This is a special kind of __dynamic_array. It expects to
+ * have a null terminated character array passed to it (it allows
+ * for NULL too, which would be converted into "(null)"). __string
+ * takes two parameter (name, src), where name is the name of
+ * the string saved, and src is the string to copy into the
+ * ring buffer.
+ *
+ * __string(foo, bar) is similar to: strcpy(foo, bar)
+ *
+ * To assign a string, use the helper macro __assign_str().
+ *
+ * __assign_str(foo, bar);
+ *
+ * In most cases, the __assign_str() macro will take the same
+ * parameters as the __string() macro had to declare the string.
+ *
+ * __vstring: This is similar to __string() but instead of taking a
+ * dynamic length, it takes a variable list va_list 'va' variable.
+ * Some event callers already have a message from parameters saved
+ * in a va_list. Passing in the format and the va_list variable
+ * will save just enough on the ring buffer for that string.
+ * Note, the va variable used is a pointer to a va_list, not
+ * to the va_list directly.
+ *
+ * (va_list *va)
+ *
+ * __vstring(foo, fmt, va) is similar to: vsnprintf(foo, fmt, va)
+ *
+ * To assign the string, use the helper macro __assign_vstr().
+ *
+ * __assign_vstr(foo, fmt, va);
+ *
+ * In most cases, the __assign_vstr() macro will take the same
+ * parameters as the __vstring() macro had to declare the string.
+ * Use __get_str() to retrieve the __vstring() just like it would for
+ * __string().
+ *
+ * __string_len: This is a helper to a __dynamic_array, but it understands
+ * that the array has characters in it, and with the combined
+ * use of __assign_str_len(), it will allocate 'len' + 1 bytes
+ * in the ring buffer and add a '\0' to the string. This is
+ * useful if the string being saved has no terminating '\0' byte.
+ * It requires that the length of the string is known as it acts
+ * like a memcpy().
+ *
+ * Declared with:
+ *
+ * __string_len(foo, bar, len)
+ *
+ * To assign this string, use the helper macro __assign_str_len().
+ *
+ * __assign_str_len(foo, bar, len);
+ *
+ * Then len + 1 is allocated to the ring buffer, and a nul terminating
+ * byte is added. This is similar to:
+ *
+ * memcpy(__get_str(foo), bar, len);
+ * __get_str(foo)[len] = 0;
+ *
+ * The advantage of using this over __dynamic_array, is that it
+ * takes care of allocating the extra byte on the ring buffer
+ * for the '\0' terminating byte, and __get_str(foo) can be used
+ * in the TP_printk().
+ *
+ * __bitmask: This is another kind of __dynamic_array, but it expects
+ * an array of longs, and the number of bits to parse. It takes
+ * two parameters (name, nr_bits), where name is the name of the
+ * bitmask to save, and the nr_bits is the number of bits to record.
+ *
+ * __bitmask(target_cpu, nr_cpumask_bits)
+ *
+ * To assign a bitmask, use the __assign_bitmask() helper macro.
+ *
+ * __assign_bitmask(target_cpus, cpumask_bits(bar), nr_cpumask_bits);
+ *
+ *
+ * fast_assign: This is a C like function that is used to store the items
+ * into the ring buffer. A special variable called "__entry" will be the
+ * structure that points into the ring buffer and has the same fields as
+ * described by the struct part of TRACE_EVENT above.
+ *
+ * printk: This is a way to print out the data in pretty print. This is
+ * useful if the system crashes and you are logging via a serial line,
+ * the data can be printed to the console using this "printk" method.
+ * This is also used to print out the data from the trace files.
+ * Again, the __entry macro is used to access the data from the ring buffer.
+ *
+ * Note, __dynamic_array, __string, and __bitmask require special helpers
+ * to access the data.
+ *
+ * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
+ * Use __get_dynamic_array_len(foo) to get the length of the array
+ * saved. Note, __get_dynamic_array_len() returns the total allocated
+ * length of the dynamic array; __print_array() expects the second
+ * parameter to be the number of elements. To get that, the array length
+ * needs to be divided by the element size.
+ *
+ * For __string(foo, bar) use __get_str(foo)
+ *
+ * For __bitmask(target_cpus, nr_cpumask_bits) use __get_bitmask(target_cpus)
+ *
+ *
+ * Note, that for both the assign and the printk, __entry is the handler
+ * to the data structure in the ring buffer, and is defined by the
+ * TP_STRUCT__entry.
+ */
+
+/*
+ * It is OK to have helper functions in the file, but they need to be protected
+ * from being defined more than once. Remember, this file gets included more
+ * than once.
+ */
+#ifndef __TRACE_EVENT_SAMPLE_HELPER_FUNCTIONS
+#define __TRACE_EVENT_SAMPLE_HELPER_FUNCTIONS
+static inline int __length_of(const int *list)
+{
+ int i;
+
+ if (!list)
+ return 0;
+
+ for (i = 0; list[i]; i++)
+ ;
+ return i;
+}
+
+enum {
+ TRACE_SAMPLE_FOO = 2,
+ TRACE_SAMPLE_BAR = 4,
+ TRACE_SAMPLE_ZOO = 8,
+};
+#endif
+
+/*
+ * If enums are used in the TP_printk(), their names will be shown in
+ * format files and not their values. This can cause problems with user
+ * space programs that parse the format files to know how to translate
+ * the raw binary trace output into human readable text.
+ *
+ * To help out user space programs, any enum that is used in the TP_printk()
+ * should be defined by TRACE_DEFINE_ENUM() macro. All that is needed to
+ * be done is to add this macro with the enum within it in the trace
+ * header file, and it will be converted in the output.
+ */
+
+TRACE_DEFINE_ENUM(TRACE_SAMPLE_FOO);
+TRACE_DEFINE_ENUM(TRACE_SAMPLE_BAR);
+TRACE_DEFINE_ENUM(TRACE_SAMPLE_ZOO);
+
+TRACE_EVENT(foo_bar,
+
+ TP_PROTO(const char *foo, int bar, const int *lst,
+ const char *string, const struct cpumask *mask,
+ const char *fmt, va_list *va),
+
+ TP_ARGS(foo, bar, lst, string, mask, fmt, va),
+
+ TP_STRUCT__entry(
+ __array( char, foo, 10 )
+ __field( int, bar )
+ __dynamic_array(int, list, __length_of(lst))
+ __string( str, string )
+ __bitmask( cpus, num_possible_cpus() )
+ __vstring( vstr, fmt, va )
+ ),
+
+ TP_fast_assign(
+ strlcpy(__entry->foo, foo, 10);
+ __entry->bar = bar;
+ memcpy(__get_dynamic_array(list), lst,
+ __length_of(lst) * sizeof(int));
+ __assign_str(str, string);
+ __assign_vstr(vstr, fmt, va);
+ __assign_bitmask(cpus, cpumask_bits(mask), num_possible_cpus());
+ ),
+
+ TP_printk("foo %s %d %s %s %s %s (%s) %s", __entry->foo, __entry->bar,
+
+/*
+ * Notice here the use of some helper functions. This includes:
+ *
+ * __print_symbolic( variable, { value, "string" }, ... ),
+ *
+ * The variable is tested against each value of the { } pair. If
+ * the variable matches one of the values, then it will print the
+ * string in that pair. If non are matched, it returns a string
+ * version of the number (if __entry->bar == 7 then "7" is returned).
+ */
+ __print_symbolic(__entry->bar,
+ { 0, "zero" },
+ { TRACE_SAMPLE_FOO, "TWO" },
+ { TRACE_SAMPLE_BAR, "FOUR" },
+ { TRACE_SAMPLE_ZOO, "EIGHT" },
+ { 10, "TEN" }
+ ),
+
+/*
+ * __print_flags( variable, "delim", { value, "flag" }, ... ),
+ *
+ * This is similar to __print_symbolic, except that it tests the bits
+ * of the value. If ((FLAG & variable) == FLAG) then the string is
+ * printed. If more than one flag matches, then each one that does is
+ * also printed with delim in between them.
+ * If not all bits are accounted for, then the not found bits will be
+ * added in hex format: 0x506 will show BIT2|BIT4|0x500
+ */
+ __print_flags(__entry->bar, "|",
+ { 1, "BIT1" },
+ { 2, "BIT2" },
+ { 4, "BIT3" },
+ { 8, "BIT4" }
+ ),
+/*
+ * __print_array( array, len, element_size )
+ *
+ * This prints out the array that is defined by __array in a nice format.
+ */
+ __print_array(__get_dynamic_array(list),
+ __get_dynamic_array_len(list) / sizeof(int),
+ sizeof(int)),
+ __get_str(str), __get_bitmask(cpus), __get_str(vstr))
+);
+
+/*
+ * There may be a case where a tracepoint should only be called if
+ * some condition is set. Otherwise the tracepoint should not be called.
+ * But to do something like:
+ *
+ * if (cond)
+ * trace_foo();
+ *
+ * Would cause a little overhead when tracing is not enabled, and that
+ * overhead, even if small, is not something we want. As tracepoints
+ * use static branch (aka jump_labels), where no branch is taken to
+ * skip the tracepoint when not enabled, and a jmp is placed to jump
+ * to the tracepoint code when it is enabled, having a if statement
+ * nullifies that optimization. It would be nice to place that
+ * condition within the static branch. This is where TRACE_EVENT_CONDITION
+ * comes in.
+ *
+ * TRACE_EVENT_CONDITION() is just like TRACE_EVENT, except it adds another
+ * parameter just after args. Where TRACE_EVENT has:
+ *
+ * TRACE_EVENT(name, proto, args, struct, assign, printk)
+ *
+ * the CONDITION version has:
+ *
+ * TRACE_EVENT_CONDITION(name, proto, args, cond, struct, assign, printk)
+ *
+ * Everything is the same as TRACE_EVENT except for the new cond. Think
+ * of the cond variable as:
+ *
+ * if (cond)
+ * trace_foo_bar_with_cond();
+ *
+ * Except that the logic for the if branch is placed after the static branch.
+ * That is, the if statement that processes the condition will not be
+ * executed unless that traecpoint is enabled. Otherwise it still remains
+ * a nop.
+ */
+TRACE_EVENT_CONDITION(foo_bar_with_cond,
+
+ TP_PROTO(const char *foo, int bar),
+
+ TP_ARGS(foo, bar),
+
+ TP_CONDITION(!(bar % 10)),
+
+ TP_STRUCT__entry(
+ __string( foo, foo )
+ __field( int, bar )
+ ),
+
+ TP_fast_assign(
+ __assign_str(foo, foo);
+ __entry->bar = bar;
+ ),
+
+ TP_printk("foo %s %d", __get_str(foo), __entry->bar)
+);
+
+int foo_bar_reg(void);
+void foo_bar_unreg(void);
+
+/*
+ * Now in the case that some function needs to be called when the
+ * tracepoint is enabled and/or when it is disabled, the
+ * TRACE_EVENT_FN() serves this purpose. This is just like TRACE_EVENT()
+ * but adds two more parameters at the end:
+ *
+ * TRACE_EVENT_FN( name, proto, args, struct, assign, printk, reg, unreg)
+ *
+ * reg and unreg are functions with the prototype of:
+ *
+ * void reg(void)
+ *
+ * The reg function gets called before the tracepoint is enabled, and
+ * the unreg function gets called after the tracepoint is disabled.
+ *
+ * Note, reg and unreg are allowed to be NULL. If you only need to
+ * call a function before enabling, or after disabling, just set one
+ * function and pass in NULL for the other parameter.
+ */
+TRACE_EVENT_FN(foo_bar_with_fn,
+
+ TP_PROTO(const char *foo, int bar),
+
+ TP_ARGS(foo, bar),
+
+ TP_STRUCT__entry(
+ __string( foo, foo )
+ __field( int, bar )
+ ),
+
+ TP_fast_assign(
+ __assign_str(foo, foo);
+ __entry->bar = bar;
+ ),
+
+ TP_printk("foo %s %d", __get_str(foo), __entry->bar),
+
+ foo_bar_reg, foo_bar_unreg
+);
+
+/*
+ * Each TRACE_EVENT macro creates several helper functions to produce
+ * the code to add the tracepoint, create the files in the trace
+ * directory, hook it to perf, assign the values and to print out
+ * the raw data from the ring buffer. To prevent too much bloat,
+ * if there are more than one tracepoint that uses the same format
+ * for the proto, args, struct, assign and printk, and only the name
+ * is different, it is highly recommended to use the DECLARE_EVENT_CLASS
+ *
+ * DECLARE_EVENT_CLASS() macro creates most of the functions for the
+ * tracepoint. Then DEFINE_EVENT() is use to hook a tracepoint to those
+ * functions. This DEFINE_EVENT() is an instance of the class and can
+ * be enabled and disabled separately from other events (either TRACE_EVENT
+ * or other DEFINE_EVENT()s).
+ *
+ * Note, TRACE_EVENT() itself is simply defined as:
+ *
+ * #define TRACE_EVENT(name, proto, args, tstruct, assign, printk) \
+ * DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, printk); \
+ * DEFINE_EVENT(name, name, proto, args)
+ *
+ * The DEFINE_EVENT() also can be declared with conditions and reg functions:
+ *
+ * DEFINE_EVENT_CONDITION(template, name, proto, args, cond);
+ * DEFINE_EVENT_FN(template, name, proto, args, reg, unreg);
+ */
+DECLARE_EVENT_CLASS(foo_template,
+
+ TP_PROTO(const char *foo, int bar),
+
+ TP_ARGS(foo, bar),
+
+ TP_STRUCT__entry(
+ __string( foo, foo )
+ __field( int, bar )
+ ),
+
+ TP_fast_assign(
+ __assign_str(foo, foo);
+ __entry->bar = bar;
+ ),
+
+ TP_printk("foo %s %d", __get_str(foo), __entry->bar)
+);
+
+/*
+ * Here's a better way for the previous samples (except, the first
+ * example had more fields and could not be used here).
+ */
+DEFINE_EVENT(foo_template, foo_with_template_simple,
+ TP_PROTO(const char *foo, int bar),
+ TP_ARGS(foo, bar));
+
+DEFINE_EVENT_CONDITION(foo_template, foo_with_template_cond,
+ TP_PROTO(const char *foo, int bar),
+ TP_ARGS(foo, bar),
+ TP_CONDITION(!(bar % 8)));
+
+
+DEFINE_EVENT_FN(foo_template, foo_with_template_fn,
+ TP_PROTO(const char *foo, int bar),
+ TP_ARGS(foo, bar),
+ foo_bar_reg, foo_bar_unreg);
+
+/*
+ * Anytime two events share basically the same values and have
+ * the same output, use the DECLARE_EVENT_CLASS() and DEFINE_EVENT()
+ * when ever possible.
+ */
+
+/*
+ * If the event is similar to the DECLARE_EVENT_CLASS, but you need
+ * to have a different output, then use DEFINE_EVENT_PRINT() which
+ * lets you override the TP_printk() of the class.
+ */
+
+DEFINE_EVENT_PRINT(foo_template, foo_with_template_print,
+ TP_PROTO(const char *foo, int bar),
+ TP_ARGS(foo, bar),
+ TP_printk("bar %s %d", __get_str(foo), __entry->bar));
+
+/*
+ * There are yet another __rel_loc dynamic data attribute. If you
+ * use __rel_dynamic_array() and __rel_string() etc. macros, you
+ * can use this attribute. There is no difference from the viewpoint
+ * of functionality with/without 'rel' but the encoding is a bit
+ * different. This is expected to be used with user-space event,
+ * there is no reason that the kernel event use this, but only for
+ * testing.
+ */
+
+TRACE_EVENT(foo_rel_loc,
+
+ TP_PROTO(const char *foo, int bar, unsigned long *mask),
+
+ TP_ARGS(foo, bar, mask),
+
+ TP_STRUCT__entry(
+ __rel_string( foo, foo )
+ __field( int, bar )
+ __rel_bitmask( bitmask,
+ BITS_PER_BYTE * sizeof(unsigned long) )
+ ),
+
+ TP_fast_assign(
+ __assign_rel_str(foo, foo);
+ __entry->bar = bar;
+ __assign_rel_bitmask(bitmask, mask,
+ BITS_PER_BYTE * sizeof(unsigned long));
+ ),
+
+ TP_printk("foo_rel_loc %s, %d, %s", __get_rel_str(foo), __entry->bar,
+ __get_rel_bitmask(bitmask))
+);
+#endif
+
+/***** NOTICE! The #if protection ends here. *****/
+
+
+/*
+ * There are several ways I could have done this. If I left out the
+ * TRACE_INCLUDE_PATH, then it would default to the kernel source
+ * include/trace/events directory.
+ *
+ * I could specify a path from the define_trace.h file back to this
+ * file.
+ *
+ * #define TRACE_INCLUDE_PATH ../../samples/trace_events
+ *
+ * But the safest and easiest way to simply make it use the directory
+ * that the file is in is to add in the Makefile:
+ *
+ * CFLAGS_trace-events-sample.o := -I$(src)
+ *
+ * This will make sure the current path is part of the include
+ * structure for our file so that define_trace.h can find it.
+ *
+ * I could have made only the top level directory the include:
+ *
+ * CFLAGS_trace-events-sample.o := -I$(PWD)
+ *
+ * And then let the path to this directory be the TRACE_INCLUDE_PATH:
+ *
+ * #define TRACE_INCLUDE_PATH samples/trace_events
+ *
+ * But then if something defines "samples" or "trace_events" as a macro
+ * then we could risk that being converted too, and give us an unexpected
+ * result.
+ */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+/*
+ * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
+ */
+#define TRACE_INCLUDE_FILE trace-events-sample
+#include <trace/define_trace.h>
diff --git a/samples/trace_events/trace_custom_sched.c b/samples/trace_events/trace_custom_sched.c
new file mode 100644
index 000000000..b99d9ab7d
--- /dev/null
+++ b/samples/trace_events/trace_custom_sched.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * event tracer
+ *
+ * Copyright (C) 2022 Google Inc, Steven Rostedt <rostedt@goodmis.org>
+ */
+
+#define pr_fmt(fmt) fmt
+
+#include <linux/trace_events.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+/*
+ * Must include the event header that the custom event will attach to,
+ * from the C file, and not in the custom header file.
+ */
+#include <trace/events/sched.h>
+
+/* Declare CREATE_CUSTOM_TRACE_EVENTS before including custom header */
+#define CREATE_CUSTOM_TRACE_EVENTS
+
+#include "trace_custom_sched.h"
+
+/*
+ * As the trace events are not exported to modules, the use of
+ * for_each_kernel_tracepoint() is needed to find the trace event
+ * to attach to. The fct() function below, is a callback that
+ * will be called for every event.
+ *
+ * Helper functions are created by the TRACE_CUSTOM_EVENT() macro
+ * update the event. Those are of the form:
+ *
+ * trace_custom_event_<event>_update()
+ *
+ * Where <event> is the event to attach.
+ */
+static void fct(struct tracepoint *tp, void *priv)
+{
+ trace_custom_event_sched_switch_update(tp);
+ trace_custom_event_sched_waking_update(tp);
+}
+
+static int __init trace_sched_init(void)
+{
+ for_each_kernel_tracepoint(fct, NULL);
+ return 0;
+}
+
+static void __exit trace_sched_exit(void)
+{
+}
+
+module_init(trace_sched_init);
+module_exit(trace_sched_exit);
+
+MODULE_AUTHOR("Steven Rostedt");
+MODULE_DESCRIPTION("Custom scheduling events");
+MODULE_LICENSE("GPL");
diff --git a/samples/trace_events/trace_custom_sched.h b/samples/trace_events/trace_custom_sched.h
new file mode 100644
index 000000000..951388334
--- /dev/null
+++ b/samples/trace_events/trace_custom_sched.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Like the headers that use TRACE_EVENT(), the TRACE_CUSTOM_EVENT()
+ * needs a header that allows for multiple inclusions.
+ *
+ * Test for a unique name (here we have _TRACE_CUSTOM_SCHED_H),
+ * also allowing to continue if TRACE_CUSTOM_MULTI_READ is defined.
+ */
+#if !defined(_TRACE_CUSTOM_SCHED_H) || defined(TRACE_CUSTOM_MULTI_READ)
+#define _TRACE_CUSTOM_SCHED_H
+
+/* Include linux/trace_events.h for initial defines of TRACE_CUSTOM_EVENT() */
+#include <linux/trace_events.h>
+
+/*
+ * TRACE_CUSTOM_EVENT() is just like TRACE_EVENT(). The first parameter
+ * is the event name of an existing event where the TRACE_EVENT has been included
+ * in the C file before including this file.
+ */
+TRACE_CUSTOM_EVENT(sched_switch,
+
+ /*
+ * The TP_PROTO() and TP_ARGS must match the trace event
+ * that the custom event is using.
+ */
+ TP_PROTO(bool preempt,
+ struct task_struct *prev,
+ struct task_struct *next,
+ unsigned int prev_state),
+
+ TP_ARGS(preempt, prev, next, prev_state),
+
+ /*
+ * The next fields are where the customization happens.
+ * The TP_STRUCT__entry() defines what will be recorded
+ * in the ring buffer when the custom event triggers.
+ *
+ * The rest is just like the TRACE_EVENT() macro except that
+ * it uses the custom entry.
+ */
+ TP_STRUCT__entry(
+ __field( unsigned short, prev_prio )
+ __field( unsigned short, next_prio )
+ __field( pid_t, next_pid )
+ ),
+
+ TP_fast_assign(
+ __entry->prev_prio = prev->prio;
+ __entry->next_pid = next->pid;
+ __entry->next_prio = next->prio;
+ ),
+
+ TP_printk("prev_prio=%d next_pid=%d next_prio=%d",
+ __entry->prev_prio, __entry->next_pid, __entry->next_prio)
+)
+
+
+TRACE_CUSTOM_EVENT(sched_waking,
+
+ TP_PROTO(struct task_struct *p),
+
+ TP_ARGS(p),
+
+ TP_STRUCT__entry(
+ __field( pid_t, pid )
+ __field( unsigned short, prio )
+ ),
+
+ TP_fast_assign(
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ ),
+
+ TP_printk("pid=%d prio=%d", __entry->pid, __entry->prio)
+)
+#endif
+/*
+ * Just like the headers that create TRACE_EVENTs, the below must
+ * be outside the protection of the above #if block.
+ */
+
+/*
+ * It is required that the Makefile includes:
+ * CFLAGS_<c_file>.o := -I$(src)
+ */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/*
+ * It is requred that the TRACE_INCLUDE_FILE be the same
+ * as this file without the ".h".
+ */
+#define TRACE_INCLUDE_FILE trace_custom_sched
+#include <trace/define_custom_trace.h>
diff --git a/samples/trace_printk/Makefile b/samples/trace_printk/Makefile
new file mode 100644
index 000000000..c0df36167
--- /dev/null
+++ b/samples/trace_printk/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# builds a module that calls various trace_printk routines
+# then to use one (as root): insmod <module_name.ko>
+
+# This module can also be used to test the trace_printk code.
+
+obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace-printk.o
diff --git a/samples/trace_printk/trace-printk.c b/samples/trace_printk/trace-printk.c
new file mode 100644
index 000000000..cfc159580
--- /dev/null
+++ b/samples/trace_printk/trace-printk.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/irq_work.h>
+
+/* Must not be static to force gcc to consider these non constant */
+char *trace_printk_test_global_str =
+ "This is a dynamic string that will use trace_puts\n";
+
+char *trace_printk_test_global_str_irq =
+ "(irq) This is a dynamic string that will use trace_puts\n";
+
+char *trace_printk_test_global_str_fmt =
+ "%sThis is a %s that will use trace_printk\n";
+
+static struct irq_work irqwork;
+
+static void trace_printk_irq_work(struct irq_work *work)
+{
+ trace_printk("(irq) This is a static string that will use trace_bputs\n");
+ trace_printk(trace_printk_test_global_str_irq);
+
+ trace_printk("(irq) This is a %s that will use trace_bprintk()\n",
+ "static string");
+
+ trace_printk(trace_printk_test_global_str_fmt,
+ "(irq) ", "dynamic string");
+}
+
+static int __init trace_printk_init(void)
+{
+ init_irq_work(&irqwork, trace_printk_irq_work);
+
+ trace_printk("This is a static string that will use trace_bputs\n");
+ trace_printk(trace_printk_test_global_str);
+
+ /* Kick off printing in irq context */
+ irq_work_queue(&irqwork);
+ irq_work_sync(&irqwork);
+
+ trace_printk("This is a %s that will use trace_bprintk()\n",
+ "static string");
+
+ trace_printk(trace_printk_test_global_str_fmt, "", "dynamic string");
+
+ return 0;
+}
+
+static void __exit trace_printk_exit(void)
+{
+}
+
+module_init(trace_printk_init);
+module_exit(trace_printk_exit);
+
+MODULE_AUTHOR("Steven Rostedt");
+MODULE_DESCRIPTION("trace-printk");
+MODULE_LICENSE("GPL");
diff --git a/samples/uhid/.gitignore b/samples/uhid/.gitignore
new file mode 100644
index 000000000..0e0a5a929
--- /dev/null
+++ b/samples/uhid/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/uhid-example
diff --git a/samples/uhid/Makefile b/samples/uhid/Makefile
new file mode 100644
index 000000000..0aa424ec4
--- /dev/null
+++ b/samples/uhid/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+userprogs-always-y += uhid-example
+
+userccflags += -I usr/include
diff --git a/samples/uhid/uhid-example.c b/samples/uhid/uhid-example.c
new file mode 100644
index 000000000..015cb06a2
--- /dev/null
+++ b/samples/uhid/uhid-example.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UHID Example
+ *
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * The code may be used by anyone for any purpose,
+ * and can serve as a starting point for developing
+ * applications using uhid.
+ */
+
+/*
+ * UHID Example
+ * This example emulates a basic 3 buttons mouse with wheel over UHID. Run this
+ * program as root and then use the following keys to control the mouse:
+ * q: Quit the application
+ * 1: Toggle left button (down, up, ...)
+ * 2: Toggle right button
+ * 3: Toggle middle button
+ * a: Move mouse left
+ * d: Move mouse right
+ * w: Move mouse up
+ * s: Move mouse down
+ * r: Move wheel up
+ * f: Move wheel down
+ *
+ * Additionally to 3 button mouse, 3 keyboard LEDs are also supported (LED_NUML,
+ * LED_CAPSL and LED_SCROLLL). The device doesn't generate any related keyboard
+ * events, though. You need to manually write the EV_LED/LED_XY/1 activation
+ * input event to the evdev device to see it being sent to this device.
+ *
+ * If uhid is not available as /dev/uhid, then you can pass a different path as
+ * first argument.
+ * If <linux/uhid.h> is not installed in /usr, then compile this with:
+ * gcc -o ./uhid_test -Wall -I./include ./samples/uhid/uhid-example.c
+ * And ignore the warning about kernel headers. However, it is recommended to
+ * use the installed uhid.h if available.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <termios.h>
+#include <unistd.h>
+#include <linux/uhid.h>
+
+/*
+ * HID Report Desciptor
+ * We emulate a basic 3 button mouse with wheel and 3 keyboard LEDs. This is
+ * the report-descriptor as the kernel will parse it:
+ *
+ * INPUT(1)[INPUT]
+ * Field(0)
+ * Physical(GenericDesktop.Pointer)
+ * Application(GenericDesktop.Mouse)
+ * Usage(3)
+ * Button.0001
+ * Button.0002
+ * Button.0003
+ * Logical Minimum(0)
+ * Logical Maximum(1)
+ * Report Size(1)
+ * Report Count(3)
+ * Report Offset(0)
+ * Flags( Variable Absolute )
+ * Field(1)
+ * Physical(GenericDesktop.Pointer)
+ * Application(GenericDesktop.Mouse)
+ * Usage(3)
+ * GenericDesktop.X
+ * GenericDesktop.Y
+ * GenericDesktop.Wheel
+ * Logical Minimum(-128)
+ * Logical Maximum(127)
+ * Report Size(8)
+ * Report Count(3)
+ * Report Offset(8)
+ * Flags( Variable Relative )
+ * OUTPUT(2)[OUTPUT]
+ * Field(0)
+ * Application(GenericDesktop.Keyboard)
+ * Usage(3)
+ * LED.NumLock
+ * LED.CapsLock
+ * LED.ScrollLock
+ * Logical Minimum(0)
+ * Logical Maximum(1)
+ * Report Size(1)
+ * Report Count(3)
+ * Report Offset(0)
+ * Flags( Variable Absolute )
+ *
+ * This is the mapping that we expect:
+ * Button.0001 ---> Key.LeftBtn
+ * Button.0002 ---> Key.RightBtn
+ * Button.0003 ---> Key.MiddleBtn
+ * GenericDesktop.X ---> Relative.X
+ * GenericDesktop.Y ---> Relative.Y
+ * GenericDesktop.Wheel ---> Relative.Wheel
+ * LED.NumLock ---> LED.NumLock
+ * LED.CapsLock ---> LED.CapsLock
+ * LED.ScrollLock ---> LED.ScrollLock
+ *
+ * This information can be verified by reading /sys/kernel/debug/hid/<dev>/rdesc
+ * This file should print the same information as showed above.
+ */
+
+static unsigned char rdesc[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xa1, 0x01, /* COLLECTION (Application) */
+ 0x09, 0x01, /* USAGE (Pointer) */
+ 0xa1, 0x00, /* COLLECTION (Physical) */
+ 0x85, 0x01, /* REPORT_ID (1) */
+ 0x05, 0x09, /* USAGE_PAGE (Button) */
+ 0x19, 0x01, /* USAGE_MINIMUM (Button 1) */
+ 0x29, 0x03, /* USAGE_MAXIMUM (Button 3) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
+ 0x95, 0x03, /* REPORT_COUNT (3) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (Data,Var,Abs) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x75, 0x05, /* REPORT_SIZE (5) */
+ 0x81, 0x01, /* INPUT (Cnst,Var,Abs) */
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x09, 0x38, /* USAGE (WHEEL) */
+ 0x15, 0x81, /* LOGICAL_MINIMUM (-127) */
+ 0x25, 0x7f, /* LOGICAL_MAXIMUM (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x03, /* REPORT_COUNT (3) */
+ 0x81, 0x06, /* INPUT (Data,Var,Rel) */
+ 0xc0, /* END_COLLECTION */
+ 0xc0, /* END_COLLECTION */
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x06, /* USAGE (Keyboard) */
+ 0xa1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID (2) */
+ 0x05, 0x08, /* USAGE_PAGE (Led) */
+ 0x19, 0x01, /* USAGE_MINIMUM (1) */
+ 0x29, 0x03, /* USAGE_MAXIMUM (3) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
+ 0x95, 0x03, /* REPORT_COUNT (3) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x91, 0x02, /* Output (Data,Var,Abs) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x75, 0x05, /* REPORT_SIZE (5) */
+ 0x91, 0x01, /* Output (Cnst,Var,Abs) */
+ 0xc0, /* END_COLLECTION */
+};
+
+static int uhid_write(int fd, const struct uhid_event *ev)
+{
+ ssize_t ret;
+
+ ret = write(fd, ev, sizeof(*ev));
+ if (ret < 0) {
+ fprintf(stderr, "Cannot write to uhid: %m\n");
+ return -errno;
+ } else if (ret != sizeof(*ev)) {
+ fprintf(stderr, "Wrong size written to uhid: %zd != %zu\n",
+ ret, sizeof(ev));
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+static int create(int fd)
+{
+ struct uhid_event ev;
+
+ memset(&ev, 0, sizeof(ev));
+ ev.type = UHID_CREATE;
+ strcpy((char*)ev.u.create.name, "test-uhid-device");
+ ev.u.create.rd_data = rdesc;
+ ev.u.create.rd_size = sizeof(rdesc);
+ ev.u.create.bus = BUS_USB;
+ ev.u.create.vendor = 0x15d9;
+ ev.u.create.product = 0x0a37;
+ ev.u.create.version = 0;
+ ev.u.create.country = 0;
+
+ return uhid_write(fd, &ev);
+}
+
+static void destroy(int fd)
+{
+ struct uhid_event ev;
+
+ memset(&ev, 0, sizeof(ev));
+ ev.type = UHID_DESTROY;
+
+ uhid_write(fd, &ev);
+}
+
+/* This parses raw output reports sent by the kernel to the device. A normal
+ * uhid program shouldn't do this but instead just forward the raw report.
+ * However, for ducomentational purposes, we try to detect LED events here and
+ * print debug messages for it. */
+static void handle_output(struct uhid_event *ev)
+{
+ /* LED messages are adverised via OUTPUT reports; ignore the rest */
+ if (ev->u.output.rtype != UHID_OUTPUT_REPORT)
+ return;
+ /* LED reports have length 2 bytes */
+ if (ev->u.output.size != 2)
+ return;
+ /* first byte is report-id which is 0x02 for LEDs in our rdesc */
+ if (ev->u.output.data[0] != 0x2)
+ return;
+
+ /* print flags payload */
+ fprintf(stderr, "LED output report received with flags %x\n",
+ ev->u.output.data[1]);
+}
+
+static int event(int fd)
+{
+ struct uhid_event ev;
+ ssize_t ret;
+
+ memset(&ev, 0, sizeof(ev));
+ ret = read(fd, &ev, sizeof(ev));
+ if (ret == 0) {
+ fprintf(stderr, "Read HUP on uhid-cdev\n");
+ return -EFAULT;
+ } else if (ret < 0) {
+ fprintf(stderr, "Cannot read uhid-cdev: %m\n");
+ return -errno;
+ } else if (ret != sizeof(ev)) {
+ fprintf(stderr, "Invalid size read from uhid-dev: %zd != %zu\n",
+ ret, sizeof(ev));
+ return -EFAULT;
+ }
+
+ switch (ev.type) {
+ case UHID_START:
+ fprintf(stderr, "UHID_START from uhid-dev\n");
+ break;
+ case UHID_STOP:
+ fprintf(stderr, "UHID_STOP from uhid-dev\n");
+ break;
+ case UHID_OPEN:
+ fprintf(stderr, "UHID_OPEN from uhid-dev\n");
+ break;
+ case UHID_CLOSE:
+ fprintf(stderr, "UHID_CLOSE from uhid-dev\n");
+ break;
+ case UHID_OUTPUT:
+ fprintf(stderr, "UHID_OUTPUT from uhid-dev\n");
+ handle_output(&ev);
+ break;
+ case UHID_OUTPUT_EV:
+ fprintf(stderr, "UHID_OUTPUT_EV from uhid-dev\n");
+ break;
+ default:
+ fprintf(stderr, "Invalid event from uhid-dev: %u\n", ev.type);
+ }
+
+ return 0;
+}
+
+static bool btn1_down;
+static bool btn2_down;
+static bool btn3_down;
+static signed char abs_hor;
+static signed char abs_ver;
+static signed char wheel;
+
+static int send_event(int fd)
+{
+ struct uhid_event ev;
+
+ memset(&ev, 0, sizeof(ev));
+ ev.type = UHID_INPUT;
+ ev.u.input.size = 5;
+
+ ev.u.input.data[0] = 0x1;
+ if (btn1_down)
+ ev.u.input.data[1] |= 0x1;
+ if (btn2_down)
+ ev.u.input.data[1] |= 0x2;
+ if (btn3_down)
+ ev.u.input.data[1] |= 0x4;
+
+ ev.u.input.data[2] = abs_hor;
+ ev.u.input.data[3] = abs_ver;
+ ev.u.input.data[4] = wheel;
+
+ return uhid_write(fd, &ev);
+}
+
+static int keyboard(int fd)
+{
+ char buf[128];
+ ssize_t ret, i;
+
+ ret = read(STDIN_FILENO, buf, sizeof(buf));
+ if (ret == 0) {
+ fprintf(stderr, "Read HUP on stdin\n");
+ return -EFAULT;
+ } else if (ret < 0) {
+ fprintf(stderr, "Cannot read stdin: %m\n");
+ return -errno;
+ }
+
+ for (i = 0; i < ret; ++i) {
+ switch (buf[i]) {
+ case '1':
+ btn1_down = !btn1_down;
+ ret = send_event(fd);
+ if (ret)
+ return ret;
+ break;
+ case '2':
+ btn2_down = !btn2_down;
+ ret = send_event(fd);
+ if (ret)
+ return ret;
+ break;
+ case '3':
+ btn3_down = !btn3_down;
+ ret = send_event(fd);
+ if (ret)
+ return ret;
+ break;
+ case 'a':
+ abs_hor = -20;
+ ret = send_event(fd);
+ abs_hor = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'd':
+ abs_hor = 20;
+ ret = send_event(fd);
+ abs_hor = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'w':
+ abs_ver = -20;
+ ret = send_event(fd);
+ abs_ver = 0;
+ if (ret)
+ return ret;
+ break;
+ case 's':
+ abs_ver = 20;
+ ret = send_event(fd);
+ abs_ver = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'r':
+ wheel = 1;
+ ret = send_event(fd);
+ wheel = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'f':
+ wheel = -1;
+ ret = send_event(fd);
+ wheel = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'q':
+ return -ECANCELED;
+ default:
+ fprintf(stderr, "Invalid input: %c\n", buf[i]);
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int fd;
+ const char *path = "/dev/uhid";
+ struct pollfd pfds[2];
+ int ret;
+ struct termios state;
+
+ ret = tcgetattr(STDIN_FILENO, &state);
+ if (ret) {
+ fprintf(stderr, "Cannot get tty state\n");
+ } else {
+ state.c_lflag &= ~ICANON;
+ state.c_cc[VMIN] = 1;
+ ret = tcsetattr(STDIN_FILENO, TCSANOW, &state);
+ if (ret)
+ fprintf(stderr, "Cannot set tty state\n");
+ }
+
+ if (argc >= 2) {
+ if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
+ fprintf(stderr, "Usage: %s [%s]\n", argv[0], path);
+ return EXIT_SUCCESS;
+ } else {
+ path = argv[1];
+ }
+ }
+
+ fprintf(stderr, "Open uhid-cdev %s\n", path);
+ fd = open(path, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ fprintf(stderr, "Cannot open uhid-cdev %s: %m\n", path);
+ return EXIT_FAILURE;
+ }
+
+ fprintf(stderr, "Create uhid device\n");
+ ret = create(fd);
+ if (ret) {
+ close(fd);
+ return EXIT_FAILURE;
+ }
+
+ pfds[0].fd = STDIN_FILENO;
+ pfds[0].events = POLLIN;
+ pfds[1].fd = fd;
+ pfds[1].events = POLLIN;
+
+ fprintf(stderr, "Press 'q' to quit...\n");
+ while (1) {
+ ret = poll(pfds, 2, -1);
+ if (ret < 0) {
+ fprintf(stderr, "Cannot poll for fds: %m\n");
+ break;
+ }
+ if (pfds[0].revents & POLLHUP) {
+ fprintf(stderr, "Received HUP on stdin\n");
+ break;
+ }
+ if (pfds[1].revents & POLLHUP) {
+ fprintf(stderr, "Received HUP on uhid-cdev\n");
+ break;
+ }
+
+ if (pfds[0].revents & POLLIN) {
+ ret = keyboard(fd);
+ if (ret)
+ break;
+ }
+ if (pfds[1].revents & POLLIN) {
+ ret = event(fd);
+ if (ret)
+ break;
+ }
+ }
+
+ fprintf(stderr, "Destroy uhid device\n");
+ destroy(fd);
+ return EXIT_SUCCESS;
+}
diff --git a/samples/user_events/Makefile b/samples/user_events/Makefile
new file mode 100644
index 000000000..7252b589d
--- /dev/null
+++ b/samples/user_events/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -Wl,-no-as-needed -Wall -I../../usr/include
+
+example: example.o
+example.o: example.c
diff --git a/samples/user_events/example.c b/samples/user_events/example.c
new file mode 100644
index 000000000..d06dc2415
--- /dev/null
+++ b/samples/user_events/example.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Microsoft Corporation.
+ *
+ * Authors:
+ * Beau Belgrave <beaub@linux.microsoft.com>
+ */
+
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <asm/bitsperlong.h>
+#include <endian.h>
+#include <linux/user_events.h>
+
+#if __BITS_PER_LONG == 64
+#define endian_swap(x) htole64(x)
+#else
+#define endian_swap(x) htole32(x)
+#endif
+
+/* Assumes debugfs is mounted */
+const char *data_file = "/sys/kernel/debug/tracing/user_events_data";
+const char *status_file = "/sys/kernel/debug/tracing/user_events_status";
+
+static int event_status(long **status)
+{
+ int fd = open(status_file, O_RDONLY);
+
+ *status = mmap(NULL, sysconf(_SC_PAGESIZE), PROT_READ,
+ MAP_SHARED, fd, 0);
+
+ close(fd);
+
+ if (*status == MAP_FAILED)
+ return -1;
+
+ return 0;
+}
+
+static int event_reg(int fd, const char *command, long *index, long *mask,
+ int *write)
+{
+ struct user_reg reg = {0};
+
+ reg.size = sizeof(reg);
+ reg.name_args = (__u64)command;
+
+ if (ioctl(fd, DIAG_IOCSREG, &reg) == -1)
+ return -1;
+
+ *index = reg.status_bit / __BITS_PER_LONG;
+ *mask = endian_swap(1L << (reg.status_bit % __BITS_PER_LONG));
+ *write = reg.write_index;
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int data_fd, write;
+ long index, mask;
+ long *status_page;
+ struct iovec io[2];
+ __u32 count = 0;
+
+ if (event_status(&status_page) == -1)
+ return errno;
+
+ data_fd = open(data_file, O_RDWR);
+
+ if (event_reg(data_fd, "test u32 count", &index, &mask, &write) == -1)
+ return errno;
+
+ /* Setup iovec */
+ io[0].iov_base = &write;
+ io[0].iov_len = sizeof(write);
+ io[1].iov_base = &count;
+ io[1].iov_len = sizeof(count);
+
+ask:
+ printf("Press enter to check status...\n");
+ getchar();
+
+ /* Check if anyone is listening */
+ if (status_page[index] & mask) {
+ /* Yep, trace out our data */
+ writev(data_fd, (const struct iovec *)io, 2);
+
+ /* Increase the count */
+ count++;
+
+ printf("Something was attached, wrote data\n");
+ }
+
+ goto ask;
+
+ return 0;
+}
diff --git a/samples/v4l/Makefile b/samples/v4l/Makefile
new file mode 100644
index 000000000..f86ab1245
--- /dev/null
+++ b/samples/v4l/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_PCI_SKELETON) := v4l2-pci-skeleton.o
diff --git a/samples/v4l/v4l2-pci-skeleton.c b/samples/v4l/v4l2-pci-skeleton.c
new file mode 100644
index 000000000..a61f94db1
--- /dev/null
+++ b/samples/v4l/v4l2-pci-skeleton.c
@@ -0,0 +1,903 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This is a V4L2 PCI Skeleton Driver. It gives an initial skeleton source
+ * for use with other PCI drivers.
+ *
+ * This skeleton PCI driver assumes that the card has an S-Video connector as
+ * input 0 and an HDMI connector as input 1.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+MODULE_DESCRIPTION("V4L2 PCI Skeleton Driver");
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_LICENSE("GPL v2");
+
+/**
+ * struct skeleton - All internal data for one instance of device
+ * @pdev: PCI device
+ * @v4l2_dev: top-level v4l2 device struct
+ * @vdev: video node structure
+ * @ctrl_handler: control handler structure
+ * @lock: ioctl serialization mutex
+ * @std: current SDTV standard
+ * @timings: current HDTV timings
+ * @format: current pix format
+ * @input: current video input (0 = SDTV, 1 = HDTV)
+ * @queue: vb2 video capture queue
+ * @qlock: spinlock controlling access to buf_list and sequence
+ * @buf_list: list of buffers queued for DMA
+ * @field: the field (TOP/BOTTOM/other) of the current buffer
+ * @sequence: frame sequence counter
+ */
+struct skeleton {
+ struct pci_dev *pdev;
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct mutex lock;
+ v4l2_std_id std;
+ struct v4l2_dv_timings timings;
+ struct v4l2_pix_format format;
+ unsigned input;
+
+ struct vb2_queue queue;
+
+ spinlock_t qlock;
+ struct list_head buf_list;
+ unsigned field;
+ unsigned sequence;
+};
+
+struct skel_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+static inline struct skel_buffer *to_skel_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct skel_buffer, vb);
+}
+
+static const struct pci_device_id skeleton_pci_tbl[] = {
+ /* { PCI_DEVICE(PCI_VENDOR_ID_, PCI_DEVICE_ID_) }, */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, skeleton_pci_tbl);
+
+/*
+ * HDTV: this structure has the capabilities of the HDTV receiver.
+ * It is used to constrain the huge list of possible formats based
+ * upon the hardware capabilities.
+ */
+static const struct v4l2_dv_timings_cap skel_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(
+ 720, 1920, /* min/max width */
+ 480, 1080, /* min/max height */
+ 27000000, 74250000, /* min/max pixelclock*/
+ V4L2_DV_BT_STD_CEA861, /* Supported standards */
+ /* capabilities */
+ V4L2_DV_BT_CAP_INTERLACED | V4L2_DV_BT_CAP_PROGRESSIVE
+ )
+};
+
+/*
+ * Supported SDTV standards. This does the same job as skel_timings_cap, but
+ * for standard TV formats.
+ */
+#define SKEL_TVNORMS V4L2_STD_ALL
+
+/*
+ * Interrupt handler: typically interrupts happen after a new frame has been
+ * captured. It is the job of the handler to remove the new frame from the
+ * internal list and give it back to the vb2 framework, updating the sequence
+ * counter, field and timestamp at the same time.
+ */
+static irqreturn_t skeleton_irq(int irq, void *dev_id)
+{
+#ifdef TODO
+ struct skeleton *skel = dev_id;
+
+ /* handle interrupt */
+
+ /* Once a new frame has been captured, mark it as done like this: */
+ if (captured_new_frame) {
+ ...
+ spin_lock(&skel->qlock);
+ list_del(&new_buf->list);
+ spin_unlock(&skel->qlock);
+ new_buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ new_buf->vb.sequence = skel->sequence++;
+ new_buf->vb.field = skel->field;
+ if (skel->format.field == V4L2_FIELD_ALTERNATE) {
+ if (skel->field == V4L2_FIELD_BOTTOM)
+ skel->field = V4L2_FIELD_TOP;
+ else if (skel->field == V4L2_FIELD_TOP)
+ skel->field = V4L2_FIELD_BOTTOM;
+ }
+ vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+#endif
+ return IRQ_HANDLED;
+}
+
+/*
+ * Setup the constraints of the queue: besides setting the number of planes
+ * per buffer and the size and allocation context of each plane, it also
+ * checks if sufficient buffers have been allocated. Usually 3 is a good
+ * minimum number: many DMA engines need a minimum of 2 buffers in the
+ * queue and you need to have another available for userspace processing.
+ */
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct skeleton *skel = vb2_get_drv_priv(vq);
+
+ skel->field = skel->format.field;
+ if (skel->field == V4L2_FIELD_ALTERNATE) {
+ /*
+ * You cannot use read() with FIELD_ALTERNATE since the field
+ * information (TOP/BOTTOM) cannot be passed back to the user.
+ */
+ if (vb2_fileio_is_active(vq))
+ return -EINVAL;
+ skel->field = V4L2_FIELD_TOP;
+ }
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ if (*nplanes)
+ return sizes[0] < skel->format.sizeimage ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = skel->format.sizeimage;
+ return 0;
+}
+
+/*
+ * Prepare the buffer for queueing to the DMA engine: check and set the
+ * payload size.
+ */
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct skeleton *skel = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = skel->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(&skel->pdev->dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ return 0;
+}
+
+/*
+ * Queue this buffer to the DMA engine.
+ */
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct skeleton *skel = vb2_get_drv_priv(vb->vb2_queue);
+ struct skel_buffer *buf = to_skel_buffer(vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&skel->qlock, flags);
+ list_add_tail(&buf->list, &skel->buf_list);
+
+ /* TODO: Update any DMA pointers if necessary */
+
+ spin_unlock_irqrestore(&skel->qlock, flags);
+}
+
+static void return_all_buffers(struct skeleton *skel,
+ enum vb2_buffer_state state)
+{
+ struct skel_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skel->qlock, flags);
+ list_for_each_entry_safe(buf, node, &skel->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+ spin_unlock_irqrestore(&skel->qlock, flags);
+}
+
+/*
+ * Start streaming. First check if the minimum number of buffers have been
+ * queued. If not, then return -ENOBUFS and the vb2 framework will call
+ * this function again the next time a buffer has been queued until enough
+ * buffers are available to actually start the DMA engine.
+ */
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct skeleton *skel = vb2_get_drv_priv(vq);
+ int ret = 0;
+
+ skel->sequence = 0;
+
+ /* TODO: start DMA */
+
+ if (ret) {
+ /*
+ * In case of an error, return all active buffers to the
+ * QUEUED state
+ */
+ return_all_buffers(skel, VB2_BUF_STATE_QUEUED);
+ }
+ return ret;
+}
+
+/*
+ * Stop the DMA engine. Any remaining buffers in the DMA queue are dequeued
+ * and passed on to the vb2 framework marked as STATE_ERROR.
+ */
+static void stop_streaming(struct vb2_queue *vq)
+{
+ struct skeleton *skel = vb2_get_drv_priv(vq);
+
+ /* TODO: stop DMA */
+
+ /* Release all active buffers */
+ return_all_buffers(skel, VB2_BUF_STATE_ERROR);
+}
+
+/*
+ * The vb2 queue ops. Note that since q->lock is set we can use the standard
+ * vb2_ops_wait_prepare/finish helper functions. If q->lock would be NULL,
+ * then this driver would have to provide these ops.
+ */
+static const struct vb2_ops skel_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * Required ioctl querycap. Note that the version field is prefilled with
+ * the version of the kernel.
+ */
+static int skeleton_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, "V4L2 PCI Skeleton", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
+ pci_name(skel->pdev));
+ return 0;
+}
+
+/*
+ * Helper function to check and correct struct v4l2_pix_format. It's used
+ * not only in VIDIOC_TRY/S_FMT, but also elsewhere if changes to the SDTV
+ * standard, HDTV timings or the video input would require updating the
+ * current format.
+ */
+static void skeleton_fill_pix_format(struct skeleton *skel,
+ struct v4l2_pix_format *pix)
+{
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ if (skel->input == 0) {
+ /* S-Video input */
+ pix->width = 720;
+ pix->height = (skel->std & V4L2_STD_525_60) ? 480 : 576;
+ pix->field = V4L2_FIELD_INTERLACED;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ } else {
+ /* HDMI input */
+ pix->width = skel->timings.bt.width;
+ pix->height = skel->timings.bt.height;
+ if (skel->timings.bt.interlaced) {
+ pix->field = V4L2_FIELD_ALTERNATE;
+ pix->height /= 2;
+ } else {
+ pix->field = V4L2_FIELD_NONE;
+ }
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ }
+
+ /*
+ * The YUYV format is four bytes for every two pixels, so bytesperline
+ * is width * 2.
+ */
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->priv = 0;
+}
+
+static int skeleton_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct skeleton *skel = video_drvdata(file);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+
+ /*
+ * Due to historical reasons providing try_fmt with an unsupported
+ * pixelformat will return -EINVAL for video receivers. Webcam drivers,
+ * however, will silently correct the pixelformat. Some video capture
+ * applications rely on this behavior...
+ */
+ if (pix->pixelformat != V4L2_PIX_FMT_YUYV)
+ return -EINVAL;
+ skeleton_fill_pix_format(skel, pix);
+ return 0;
+}
+
+static int skeleton_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct skeleton *skel = video_drvdata(file);
+ int ret;
+
+ ret = skeleton_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ /*
+ * It is not allowed to change the format while buffers for use with
+ * streaming have already been allocated.
+ */
+ if (vb2_is_busy(&skel->queue))
+ return -EBUSY;
+
+ /* TODO: change format */
+ skel->format = f->fmt.pix;
+ return 0;
+}
+
+static int skeleton_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ f->fmt.pix = skel->format;
+ return 0;
+}
+
+static int skeleton_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index != 0)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_PIX_FMT_YUYV;
+ return 0;
+}
+
+static int skeleton_s_std(struct file *file, void *priv, v4l2_std_id std)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ /* S_STD is not supported on the HDMI input */
+ if (skel->input)
+ return -ENODATA;
+
+ /*
+ * No change, so just return. Some applications call S_STD again after
+ * the buffers for streaming have been set up, so we have to allow for
+ * this behavior.
+ */
+ if (std == skel->std)
+ return 0;
+
+ /*
+ * Changing the standard implies a format change, which is not allowed
+ * while buffers for use with streaming have already been allocated.
+ */
+ if (vb2_is_busy(&skel->queue))
+ return -EBUSY;
+
+ /* TODO: handle changing std */
+
+ skel->std = std;
+
+ /* Update the internal format */
+ skeleton_fill_pix_format(skel, &skel->format);
+ return 0;
+}
+
+static int skeleton_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ /* G_STD is not supported on the HDMI input */
+ if (skel->input)
+ return -ENODATA;
+
+ *std = skel->std;
+ return 0;
+}
+
+/*
+ * Query the current standard as seen by the hardware. This function shall
+ * never actually change the standard, it just detects and reports.
+ * The framework will initially set *std to tvnorms (i.e. the set of
+ * supported standards by this input), and this function should just AND
+ * this value. If there is no signal, then *std should be set to 0.
+ */
+static int skeleton_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ /* QUERY_STD is not supported on the HDMI input */
+ if (skel->input)
+ return -ENODATA;
+
+#ifdef TODO
+ /*
+ * Query currently seen standard. Initial value of *std is
+ * V4L2_STD_ALL. This function should look something like this:
+ */
+ get_signal_info();
+ if (no_signal) {
+ *std = 0;
+ return 0;
+ }
+ /* Use signal information to reduce the number of possible standards */
+ if (signal_has_525_lines)
+ *std &= V4L2_STD_525_60;
+ else
+ *std &= V4L2_STD_625_50;
+#endif
+ return 0;
+}
+
+static int skeleton_s_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ /* S_DV_TIMINGS is not supported on the S-Video input */
+ if (skel->input == 0)
+ return -ENODATA;
+
+ /* Quick sanity check */
+ if (!v4l2_valid_dv_timings(timings, &skel_timings_cap, NULL, NULL))
+ return -EINVAL;
+
+ /* Check if the timings are part of the CEA-861 timings. */
+ if (!v4l2_find_dv_timings_cap(timings, &skel_timings_cap,
+ 0, NULL, NULL))
+ return -EINVAL;
+
+ /* Return 0 if the new timings are the same as the current timings. */
+ if (v4l2_match_dv_timings(timings, &skel->timings, 0, false))
+ return 0;
+
+ /*
+ * Changing the timings implies a format change, which is not allowed
+ * while buffers for use with streaming have already been allocated.
+ */
+ if (vb2_is_busy(&skel->queue))
+ return -EBUSY;
+
+ /* TODO: Configure new timings */
+
+ /* Save timings */
+ skel->timings = *timings;
+
+ /* Update the internal format */
+ skeleton_fill_pix_format(skel, &skel->format);
+ return 0;
+}
+
+static int skeleton_g_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ /* G_DV_TIMINGS is not supported on the S-Video input */
+ if (skel->input == 0)
+ return -ENODATA;
+
+ *timings = skel->timings;
+ return 0;
+}
+
+static int skeleton_enum_dv_timings(struct file *file, void *_fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ /* ENUM_DV_TIMINGS is not supported on the S-Video input */
+ if (skel->input == 0)
+ return -ENODATA;
+
+ return v4l2_enum_dv_timings_cap(timings, &skel_timings_cap,
+ NULL, NULL);
+}
+
+/*
+ * Query the current timings as seen by the hardware. This function shall
+ * never actually change the timings, it just detects and reports.
+ * If no signal is detected, then return -ENOLINK. If the hardware cannot
+ * lock to the signal, then return -ENOLCK. If the signal is out of range
+ * of the capabilities of the system (e.g., it is possible that the receiver
+ * can lock but that the DMA engine it is connected to cannot handle
+ * pixelclocks above a certain frequency), then -ERANGE is returned.
+ */
+static int skeleton_query_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ /* QUERY_DV_TIMINGS is not supported on the S-Video input */
+ if (skel->input == 0)
+ return -ENODATA;
+
+#ifdef TODO
+ /*
+ * Query currently seen timings. This function should look
+ * something like this:
+ */
+ detect_timings();
+ if (no_signal)
+ return -ENOLINK;
+ if (cannot_lock_to_signal)
+ return -ENOLCK;
+ if (signal_out_of_range_of_capabilities)
+ return -ERANGE;
+
+ /* Useful for debugging */
+ v4l2_print_dv_timings(skel->v4l2_dev.name, "query_dv_timings:",
+ timings, true);
+#endif
+ return 0;
+}
+
+static int skeleton_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ /* DV_TIMINGS_CAP is not supported on the S-Video input */
+ if (skel->input == 0)
+ return -ENODATA;
+ *cap = skel_timings_cap;
+ return 0;
+}
+
+static int skeleton_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index > 1)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ if (i->index == 0) {
+ i->std = SKEL_TVNORMS;
+ strlcpy(i->name, "S-Video", sizeof(i->name));
+ i->capabilities = V4L2_IN_CAP_STD;
+ } else {
+ i->std = 0;
+ strlcpy(i->name, "HDMI", sizeof(i->name));
+ i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+ }
+ return 0;
+}
+
+static int skeleton_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ if (i > 1)
+ return -EINVAL;
+
+ /*
+ * Changing the input implies a format change, which is not allowed
+ * while buffers for use with streaming have already been allocated.
+ */
+ if (vb2_is_busy(&skel->queue))
+ return -EBUSY;
+
+ skel->input = i;
+ /*
+ * Update tvnorms. The tvnorms value is used by the core to implement
+ * VIDIOC_ENUMSTD so it has to be correct. If tvnorms == 0, then
+ * ENUMSTD will return -ENODATA.
+ */
+ skel->vdev.tvnorms = i ? 0 : SKEL_TVNORMS;
+
+ /* Update the internal format */
+ skeleton_fill_pix_format(skel, &skel->format);
+ return 0;
+}
+
+static int skeleton_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct skeleton *skel = video_drvdata(file);
+
+ *i = skel->input;
+ return 0;
+}
+
+/* The control handler. */
+static int skeleton_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ /*struct skeleton *skel =
+ container_of(ctrl->handler, struct skeleton, ctrl_handler);*/
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ /* TODO: set brightness to ctrl->val */
+ break;
+ case V4L2_CID_CONTRAST:
+ /* TODO: set contrast to ctrl->val */
+ break;
+ case V4L2_CID_SATURATION:
+ /* TODO: set saturation to ctrl->val */
+ break;
+ case V4L2_CID_HUE:
+ /* TODO: set hue to ctrl->val */
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ File operations for the device
+ ------------------------------------------------------------------*/
+
+static const struct v4l2_ctrl_ops skel_ctrl_ops = {
+ .s_ctrl = skeleton_s_ctrl,
+};
+
+/*
+ * The set of all supported ioctls. Note that all the streaming ioctls
+ * use the vb2 helper functions that take care of all the locking and
+ * that also do ownership tracking (i.e. only the filehandle that requested
+ * the buffers can call the streaming ioctls, all other filehandles will
+ * receive -EBUSY if they attempt to call the same streaming ioctls).
+ *
+ * The last three ioctls also use standard helper functions: these implement
+ * standard behavior for drivers with controls.
+ */
+static const struct v4l2_ioctl_ops skel_ioctl_ops = {
+ .vidioc_querycap = skeleton_querycap,
+ .vidioc_try_fmt_vid_cap = skeleton_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = skeleton_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = skeleton_g_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = skeleton_enum_fmt_vid_cap,
+
+ .vidioc_g_std = skeleton_g_std,
+ .vidioc_s_std = skeleton_s_std,
+ .vidioc_querystd = skeleton_querystd,
+
+ .vidioc_s_dv_timings = skeleton_s_dv_timings,
+ .vidioc_g_dv_timings = skeleton_g_dv_timings,
+ .vidioc_enum_dv_timings = skeleton_enum_dv_timings,
+ .vidioc_query_dv_timings = skeleton_query_dv_timings,
+ .vidioc_dv_timings_cap = skeleton_dv_timings_cap,
+
+ .vidioc_enum_input = skeleton_enum_input,
+ .vidioc_g_input = skeleton_g_input,
+ .vidioc_s_input = skeleton_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * The set of file operations. Note that all these ops are standard core
+ * helper functions.
+ */
+static const struct v4l2_file_operations skel_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+/*
+ * The initial setup of this device instance. Note that the initial state of
+ * the driver should be complete. So the initial format, standard, timings
+ * and video input should all be initialized to some reasonable value.
+ */
+static int skeleton_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ /* The initial timings are chosen to be 720p60. */
+ static const struct v4l2_dv_timings timings_def =
+ V4L2_DV_BT_CEA_1280X720P60;
+ struct skeleton *skel;
+ struct video_device *vdev;
+ struct v4l2_ctrl_handler *hdl;
+ struct vb2_queue *q;
+ int ret;
+
+ /* Enable PCI */
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "no suitable DMA available.\n");
+ goto disable_pci;
+ }
+
+ /* Allocate a new instance */
+ skel = devm_kzalloc(&pdev->dev, sizeof(struct skeleton), GFP_KERNEL);
+ if (!skel) {
+ ret = -ENOMEM;
+ goto disable_pci;
+ }
+
+ /* Allocate the interrupt */
+ ret = devm_request_irq(&pdev->dev, pdev->irq,
+ skeleton_irq, 0, KBUILD_MODNAME, skel);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto disable_pci;
+ }
+ skel->pdev = pdev;
+
+ /* Fill in the initial format-related settings */
+ skel->timings = timings_def;
+ skel->std = V4L2_STD_625_50;
+ skeleton_fill_pix_format(skel, &skel->format);
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(&pdev->dev, &skel->v4l2_dev);
+ if (ret)
+ goto disable_pci;
+
+ mutex_init(&skel->lock);
+
+ /* Add the controls */
+ hdl = &skel->ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &skel_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+ v4l2_ctrl_new_std(hdl, &skel_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 16);
+ v4l2_ctrl_new_std(hdl, &skel_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 127);
+ v4l2_ctrl_new_std(hdl, &skel_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ if (hdl->error) {
+ ret = hdl->error;
+ goto free_hdl;
+ }
+ skel->v4l2_dev.ctrl_handler = hdl;
+
+ /* Initialize the vb2 queue */
+ q = &skel->queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->dev = &pdev->dev;
+ q->drv_priv = skel;
+ q->buf_struct_size = sizeof(struct skel_buffer);
+ q->ops = &skel_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ /*
+ * Assume that this DMA engine needs to have at least two buffers
+ * available before it can be started. The start_streaming() op
+ * won't be called until at least this many buffers are queued up.
+ */
+ q->min_buffers_needed = 2;
+ /*
+ * The serialization lock for the streaming ioctls. This is the same
+ * as the main serialization lock, but if some of the non-streaming
+ * ioctls could take a long time to execute, then you might want to
+ * have a different lock here to prevent VIDIOC_DQBUF from being
+ * blocked while waiting for another action to finish. This is
+ * generally not needed for PCI devices, but USB devices usually do
+ * want a separate lock here.
+ */
+ q->lock = &skel->lock;
+ /*
+ * Since this driver can only do 32-bit DMA we must make sure that
+ * the vb2 core will allocate the buffers in 32-bit DMA memory.
+ */
+ q->gfp_flags = GFP_DMA32;
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto free_hdl;
+
+ INIT_LIST_HEAD(&skel->buf_list);
+ spin_lock_init(&skel->qlock);
+
+ /* Initialize the video_device structure */
+ vdev = &skel->vdev;
+ strlcpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name));
+ /*
+ * There is nothing to clean up, so release is set to an empty release
+ * function. The release callback must be non-NULL.
+ */
+ vdev->release = video_device_release_empty;
+ vdev->fops = &skel_fops,
+ vdev->ioctl_ops = &skel_ioctl_ops,
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ /*
+ * The main serialization lock. All ioctls are serialized by this
+ * lock. Exception: if q->lock is set, then the streaming ioctls
+ * are serialized by that separate lock.
+ */
+ vdev->lock = &skel->lock;
+ vdev->queue = q;
+ vdev->v4l2_dev = &skel->v4l2_dev;
+ /* Supported SDTV standards, if any */
+ vdev->tvnorms = SKEL_TVNORMS;
+ video_set_drvdata(vdev, skel);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ goto free_hdl;
+
+ dev_info(&pdev->dev, "V4L2 PCI Skeleton Driver loaded\n");
+ return 0;
+
+free_hdl:
+ v4l2_ctrl_handler_free(&skel->ctrl_handler);
+ v4l2_device_unregister(&skel->v4l2_dev);
+disable_pci:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void skeleton_remove(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
+ struct skeleton *skel = container_of(v4l2_dev, struct skeleton, v4l2_dev);
+
+ video_unregister_device(&skel->vdev);
+ v4l2_ctrl_handler_free(&skel->ctrl_handler);
+ v4l2_device_unregister(&skel->v4l2_dev);
+ pci_disable_device(skel->pdev);
+}
+
+static struct pci_driver skeleton_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = skeleton_probe,
+ .remove = skeleton_remove,
+ .id_table = skeleton_pci_tbl,
+};
+
+module_pci_driver(skeleton_driver);
diff --git a/samples/vfio-mdev/Makefile b/samples/vfio-mdev/Makefile
new file mode 100644
index 000000000..10d179c4f
--- /dev/null
+++ b/samples/vfio-mdev/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SAMPLE_VFIO_MDEV_MTTY) += mtty.o
+obj-$(CONFIG_SAMPLE_VFIO_MDEV_MDPY) += mdpy.o
+obj-$(CONFIG_SAMPLE_VFIO_MDEV_MDPY_FB) += mdpy-fb.o
+obj-$(CONFIG_SAMPLE_VFIO_MDEV_MBOCHS) += mbochs.o
diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c
new file mode 100644
index 000000000..117a8d799
--- /dev/null
+++ b/samples/vfio-mdev/mbochs.c
@@ -0,0 +1,1471 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mediated virtual PCI display host device driver
+ *
+ * Emulate enough of qemu stdvga to make bochs-drm.ko happy. That is
+ * basically the vram memory bar and the bochs dispi interface vbe
+ * registers in the mmio register bar. Specifically it does *not*
+ * include any legacy vga stuff. Device looks a lot like "qemu -device
+ * secondary-vga".
+ *
+ * (c) Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * based on mtty driver which is:
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/cdev.h>
+#include <linux/vfio.h>
+#include <linux/iommu.h>
+#include <linux/sysfs.h>
+#include <linux/mdev.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_modeset_lock.h>
+#include <drm/drm_property.h>
+#include <drm/drm_plane.h>
+
+
+#define VBE_DISPI_INDEX_ID 0x0
+#define VBE_DISPI_INDEX_XRES 0x1
+#define VBE_DISPI_INDEX_YRES 0x2
+#define VBE_DISPI_INDEX_BPP 0x3
+#define VBE_DISPI_INDEX_ENABLE 0x4
+#define VBE_DISPI_INDEX_BANK 0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
+#define VBE_DISPI_INDEX_X_OFFSET 0x8
+#define VBE_DISPI_INDEX_Y_OFFSET 0x9
+#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
+#define VBE_DISPI_INDEX_COUNT 0xb
+
+#define VBE_DISPI_ID0 0xB0C0
+#define VBE_DISPI_ID1 0xB0C1
+#define VBE_DISPI_ID2 0xB0C2
+#define VBE_DISPI_ID3 0xB0C3
+#define VBE_DISPI_ID4 0xB0C4
+#define VBE_DISPI_ID5 0xB0C5
+
+#define VBE_DISPI_DISABLED 0x00
+#define VBE_DISPI_ENABLED 0x01
+#define VBE_DISPI_GETCAPS 0x02
+#define VBE_DISPI_8BIT_DAC 0x20
+#define VBE_DISPI_LFB_ENABLED 0x40
+#define VBE_DISPI_NOCLEARMEM 0x80
+
+
+#define MBOCHS_NAME "mbochs"
+#define MBOCHS_CLASS_NAME "mbochs"
+
+#define MBOCHS_EDID_REGION_INDEX VFIO_PCI_NUM_REGIONS
+#define MBOCHS_NUM_REGIONS (MBOCHS_EDID_REGION_INDEX+1)
+
+#define MBOCHS_CONFIG_SPACE_SIZE 0xff
+#define MBOCHS_MMIO_BAR_OFFSET PAGE_SIZE
+#define MBOCHS_MMIO_BAR_SIZE PAGE_SIZE
+#define MBOCHS_EDID_OFFSET (MBOCHS_MMIO_BAR_OFFSET + \
+ MBOCHS_MMIO_BAR_SIZE)
+#define MBOCHS_EDID_SIZE PAGE_SIZE
+#define MBOCHS_MEMORY_BAR_OFFSET (MBOCHS_EDID_OFFSET + \
+ MBOCHS_EDID_SIZE)
+
+#define MBOCHS_EDID_BLOB_OFFSET (MBOCHS_EDID_SIZE/2)
+
+#define STORE_LE16(addr, val) (*(u16 *)addr = val)
+#define STORE_LE32(addr, val) (*(u32 *)addr = val)
+
+
+MODULE_LICENSE("GPL v2");
+
+static int max_mbytes = 256;
+module_param_named(count, max_mbytes, int, 0444);
+MODULE_PARM_DESC(mem, "megabytes available to " MBOCHS_NAME " devices");
+
+
+#define MBOCHS_TYPE_1 "small"
+#define MBOCHS_TYPE_2 "medium"
+#define MBOCHS_TYPE_3 "large"
+
+static struct mbochs_type {
+ struct mdev_type type;
+ u32 mbytes;
+ u32 max_x;
+ u32 max_y;
+} mbochs_types[] = {
+ {
+ .type.sysfs_name = MBOCHS_TYPE_1,
+ .type.pretty_name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_1,
+ .mbytes = 4,
+ .max_x = 800,
+ .max_y = 600,
+ }, {
+ .type.sysfs_name = MBOCHS_TYPE_2,
+ .type.pretty_name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_2,
+ .mbytes = 16,
+ .max_x = 1920,
+ .max_y = 1440,
+ }, {
+ .type.sysfs_name = MBOCHS_TYPE_3,
+ .type.pretty_name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_3,
+ .mbytes = 64,
+ .max_x = 0,
+ .max_y = 0,
+ },
+};
+
+static struct mdev_type *mbochs_mdev_types[] = {
+ &mbochs_types[0].type,
+ &mbochs_types[1].type,
+ &mbochs_types[2].type,
+};
+
+static dev_t mbochs_devt;
+static struct class *mbochs_class;
+static struct cdev mbochs_cdev;
+static struct device mbochs_dev;
+static struct mdev_parent mbochs_parent;
+static atomic_t mbochs_avail_mbytes;
+static const struct vfio_device_ops mbochs_dev_ops;
+
+struct vfio_region_info_ext {
+ struct vfio_region_info base;
+ struct vfio_region_info_cap_type type;
+};
+
+struct mbochs_mode {
+ u32 drm_format;
+ u32 bytepp;
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 __pad;
+ u64 offset;
+ u64 size;
+};
+
+struct mbochs_dmabuf {
+ struct mbochs_mode mode;
+ u32 id;
+ struct page **pages;
+ pgoff_t pagecount;
+ struct dma_buf *buf;
+ struct mdev_state *mdev_state;
+ struct list_head next;
+ bool unlinked;
+};
+
+/* State of each mdev device */
+struct mdev_state {
+ struct vfio_device vdev;
+ u8 *vconfig;
+ u64 bar_mask[3];
+ u32 memory_bar_mask;
+ struct mutex ops_lock;
+ struct mdev_device *mdev;
+
+ const struct mbochs_type *type;
+ u16 vbe[VBE_DISPI_INDEX_COUNT];
+ u64 memsize;
+ struct page **pages;
+ pgoff_t pagecount;
+ struct vfio_region_gfx_edid edid_regs;
+ u8 edid_blob[0x400];
+
+ struct list_head dmabufs;
+ u32 active_id;
+ u32 next_id;
+};
+
+static const char *vbe_name_list[VBE_DISPI_INDEX_COUNT] = {
+ [VBE_DISPI_INDEX_ID] = "id",
+ [VBE_DISPI_INDEX_XRES] = "xres",
+ [VBE_DISPI_INDEX_YRES] = "yres",
+ [VBE_DISPI_INDEX_BPP] = "bpp",
+ [VBE_DISPI_INDEX_ENABLE] = "enable",
+ [VBE_DISPI_INDEX_BANK] = "bank",
+ [VBE_DISPI_INDEX_VIRT_WIDTH] = "virt-width",
+ [VBE_DISPI_INDEX_VIRT_HEIGHT] = "virt-height",
+ [VBE_DISPI_INDEX_X_OFFSET] = "x-offset",
+ [VBE_DISPI_INDEX_Y_OFFSET] = "y-offset",
+ [VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = "video-mem",
+};
+
+static const char *vbe_name(u32 index)
+{
+ if (index < ARRAY_SIZE(vbe_name_list))
+ return vbe_name_list[index];
+ return "(invalid)";
+}
+
+static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
+ pgoff_t pgoff);
+static struct page *mbochs_get_page(struct mdev_state *mdev_state,
+ pgoff_t pgoff);
+
+static void mbochs_create_config_space(struct mdev_state *mdev_state)
+{
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
+ 0x1234);
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
+ 0x1111);
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
+ PCI_SUBVENDOR_ID_REDHAT_QUMRANET);
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
+ PCI_SUBDEVICE_ID_QEMU);
+
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
+ PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
+ PCI_CLASS_DISPLAY_OTHER);
+ mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
+
+ STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_32 |
+ PCI_BASE_ADDRESS_MEM_PREFETCH);
+ mdev_state->bar_mask[0] = ~(mdev_state->memsize) + 1;
+
+ STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_2],
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_32);
+ mdev_state->bar_mask[2] = ~(MBOCHS_MMIO_BAR_SIZE) + 1;
+}
+
+static int mbochs_check_framebuffer(struct mdev_state *mdev_state,
+ struct mbochs_mode *mode)
+{
+ struct device *dev = mdev_dev(mdev_state->mdev);
+ u16 *vbe = mdev_state->vbe;
+ u32 virt_width;
+
+ WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
+
+ if (!(vbe[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED))
+ goto nofb;
+
+ memset(mode, 0, sizeof(*mode));
+ switch (vbe[VBE_DISPI_INDEX_BPP]) {
+ case 32:
+ mode->drm_format = DRM_FORMAT_XRGB8888;
+ mode->bytepp = 4;
+ break;
+ default:
+ dev_info_ratelimited(dev, "%s: bpp %d not supported\n",
+ __func__, vbe[VBE_DISPI_INDEX_BPP]);
+ goto nofb;
+ }
+
+ mode->width = vbe[VBE_DISPI_INDEX_XRES];
+ mode->height = vbe[VBE_DISPI_INDEX_YRES];
+ virt_width = vbe[VBE_DISPI_INDEX_VIRT_WIDTH];
+ if (virt_width < mode->width)
+ virt_width = mode->width;
+ mode->stride = virt_width * mode->bytepp;
+ mode->size = (u64)mode->stride * mode->height;
+ mode->offset = ((u64)vbe[VBE_DISPI_INDEX_X_OFFSET] * mode->bytepp +
+ (u64)vbe[VBE_DISPI_INDEX_Y_OFFSET] * mode->stride);
+
+ if (mode->width < 64 || mode->height < 64) {
+ dev_info_ratelimited(dev, "%s: invalid resolution %dx%d\n",
+ __func__, mode->width, mode->height);
+ goto nofb;
+ }
+ if (mode->offset + mode->size > mdev_state->memsize) {
+ dev_info_ratelimited(dev, "%s: framebuffer memory overflow\n",
+ __func__);
+ goto nofb;
+ }
+
+ return 0;
+
+nofb:
+ memset(mode, 0, sizeof(*mode));
+ return -EINVAL;
+}
+
+static bool mbochs_modes_equal(struct mbochs_mode *mode1,
+ struct mbochs_mode *mode2)
+{
+ return memcmp(mode1, mode2, sizeof(struct mbochs_mode)) == 0;
+}
+
+static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
+ char *buf, u32 count)
+{
+ struct device *dev = mdev_dev(mdev_state->mdev);
+ int index = (offset - PCI_BASE_ADDRESS_0) / 0x04;
+ u32 cfg_addr;
+
+ switch (offset) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_2:
+ cfg_addr = *(u32 *)buf;
+
+ if (cfg_addr == 0xffffffff) {
+ cfg_addr = (cfg_addr & mdev_state->bar_mask[index]);
+ } else {
+ cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
+ if (cfg_addr)
+ dev_info(dev, "BAR #%d @ 0x%x\n",
+ index, cfg_addr);
+ }
+
+ cfg_addr |= (mdev_state->vconfig[offset] &
+ ~PCI_BASE_ADDRESS_MEM_MASK);
+ STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
+ break;
+ }
+}
+
+static void handle_mmio_write(struct mdev_state *mdev_state, u16 offset,
+ char *buf, u32 count)
+{
+ struct device *dev = mdev_dev(mdev_state->mdev);
+ int index;
+ u16 reg16;
+
+ switch (offset) {
+ case 0x400 ... 0x41f: /* vga ioports remapped */
+ goto unhandled;
+ case 0x500 ... 0x515: /* bochs dispi interface */
+ if (count != 2)
+ goto unhandled;
+ index = (offset - 0x500) / 2;
+ reg16 = *(u16 *)buf;
+ if (index < ARRAY_SIZE(mdev_state->vbe))
+ mdev_state->vbe[index] = reg16;
+ dev_dbg(dev, "%s: vbe write %d = %d (%s)\n",
+ __func__, index, reg16, vbe_name(index));
+ break;
+ case 0x600 ... 0x607: /* qemu extended regs */
+ goto unhandled;
+ default:
+unhandled:
+ dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n",
+ __func__, offset, count);
+ break;
+ }
+}
+
+static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset,
+ char *buf, u32 count)
+{
+ struct device *dev = mdev_dev(mdev_state->mdev);
+ struct vfio_region_gfx_edid *edid;
+ u16 reg16 = 0;
+ int index;
+
+ switch (offset) {
+ case 0x000 ... 0x3ff: /* edid block */
+ edid = &mdev_state->edid_regs;
+ if (edid->link_state != VFIO_DEVICE_GFX_LINK_STATE_UP ||
+ offset >= edid->edid_size) {
+ memset(buf, 0, count);
+ break;
+ }
+ memcpy(buf, mdev_state->edid_blob + offset, count);
+ break;
+ case 0x500 ... 0x515: /* bochs dispi interface */
+ if (count != 2)
+ goto unhandled;
+ index = (offset - 0x500) / 2;
+ if (index < ARRAY_SIZE(mdev_state->vbe))
+ reg16 = mdev_state->vbe[index];
+ dev_dbg(dev, "%s: vbe read %d = %d (%s)\n",
+ __func__, index, reg16, vbe_name(index));
+ *(u16 *)buf = reg16;
+ break;
+ default:
+unhandled:
+ dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n",
+ __func__, offset, count);
+ memset(buf, 0, count);
+ break;
+ }
+}
+
+static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset,
+ char *buf, u32 count, bool is_write)
+{
+ char *regs = (void *)&mdev_state->edid_regs;
+
+ if (offset + count > sizeof(mdev_state->edid_regs))
+ return;
+ if (count != 4)
+ return;
+ if (offset % 4)
+ return;
+
+ if (is_write) {
+ switch (offset) {
+ case offsetof(struct vfio_region_gfx_edid, link_state):
+ case offsetof(struct vfio_region_gfx_edid, edid_size):
+ memcpy(regs + offset, buf, count);
+ break;
+ default:
+ /* read-only regs */
+ break;
+ }
+ } else {
+ memcpy(buf, regs + offset, count);
+ }
+}
+
+static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset,
+ char *buf, u32 count, bool is_write)
+{
+ if (offset + count > mdev_state->edid_regs.edid_max_size)
+ return;
+ if (is_write)
+ memcpy(mdev_state->edid_blob + offset, buf, count);
+ else
+ memcpy(buf, mdev_state->edid_blob + offset, count);
+}
+
+static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
+ size_t count, loff_t pos, bool is_write)
+{
+ struct page *pg;
+ loff_t poff;
+ char *map;
+ int ret = 0;
+
+ mutex_lock(&mdev_state->ops_lock);
+
+ if (pos < MBOCHS_CONFIG_SPACE_SIZE) {
+ if (is_write)
+ handle_pci_cfg_write(mdev_state, pos, buf, count);
+ else
+ memcpy(buf, (mdev_state->vconfig + pos), count);
+
+ } else if (pos >= MBOCHS_MMIO_BAR_OFFSET &&
+ pos + count <= (MBOCHS_MMIO_BAR_OFFSET +
+ MBOCHS_MMIO_BAR_SIZE)) {
+ pos -= MBOCHS_MMIO_BAR_OFFSET;
+ if (is_write)
+ handle_mmio_write(mdev_state, pos, buf, count);
+ else
+ handle_mmio_read(mdev_state, pos, buf, count);
+
+ } else if (pos >= MBOCHS_EDID_OFFSET &&
+ pos + count <= (MBOCHS_EDID_OFFSET +
+ MBOCHS_EDID_SIZE)) {
+ pos -= MBOCHS_EDID_OFFSET;
+ if (pos < MBOCHS_EDID_BLOB_OFFSET) {
+ handle_edid_regs(mdev_state, pos, buf, count, is_write);
+ } else {
+ pos -= MBOCHS_EDID_BLOB_OFFSET;
+ handle_edid_blob(mdev_state, pos, buf, count, is_write);
+ }
+
+ } else if (pos >= MBOCHS_MEMORY_BAR_OFFSET &&
+ pos + count <=
+ MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
+ pos -= MBOCHS_MMIO_BAR_OFFSET;
+ poff = pos & ~PAGE_MASK;
+ pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
+ map = kmap(pg);
+ if (is_write)
+ memcpy(map + poff, buf, count);
+ else
+ memcpy(buf, map + poff, count);
+ kunmap(pg);
+ put_page(pg);
+
+ } else {
+ dev_dbg(mdev_state->vdev.dev, "%s: %s @0x%llx (unhandled)\n",
+ __func__, is_write ? "WR" : "RD", pos);
+ ret = -1;
+ goto accessfailed;
+ }
+
+ ret = count;
+
+
+accessfailed:
+ mutex_unlock(&mdev_state->ops_lock);
+
+ return ret;
+}
+
+static int mbochs_reset(struct mdev_state *mdev_state)
+{
+ u32 size64k = mdev_state->memsize / (64 * 1024);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdev_state->vbe); i++)
+ mdev_state->vbe[i] = 0;
+ mdev_state->vbe[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5;
+ mdev_state->vbe[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = size64k;
+ return 0;
+}
+
+static int mbochs_init_dev(struct vfio_device *vdev)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ struct mdev_device *mdev = to_mdev_device(vdev->dev);
+ struct mbochs_type *type =
+ container_of(mdev->type, struct mbochs_type, type);
+ int avail_mbytes = atomic_read(&mbochs_avail_mbytes);
+ int ret = -ENOMEM;
+
+ do {
+ if (avail_mbytes < type->mbytes)
+ return -ENOSPC;
+ } while (!atomic_try_cmpxchg(&mbochs_avail_mbytes, &avail_mbytes,
+ avail_mbytes - type->mbytes));
+
+ mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL);
+ if (!mdev_state->vconfig)
+ goto err_avail;
+
+ mdev_state->memsize = type->mbytes * 1024 * 1024;
+ mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT;
+ mdev_state->pages = kcalloc(mdev_state->pagecount,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!mdev_state->pages)
+ goto err_vconfig;
+
+ mutex_init(&mdev_state->ops_lock);
+ mdev_state->mdev = mdev;
+ INIT_LIST_HEAD(&mdev_state->dmabufs);
+ mdev_state->next_id = 1;
+
+ mdev_state->type = type;
+ mdev_state->edid_regs.max_xres = type->max_x;
+ mdev_state->edid_regs.max_yres = type->max_y;
+ mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET;
+ mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob);
+ mbochs_create_config_space(mdev_state);
+ mbochs_reset(mdev_state);
+
+ dev_info(vdev->dev, "%s: %s, %d MB, %ld pages\n", __func__,
+ type->type.pretty_name, type->mbytes, mdev_state->pagecount);
+ return 0;
+
+err_vconfig:
+ kfree(mdev_state->vconfig);
+err_avail:
+ atomic_add(type->mbytes, &mbochs_avail_mbytes);
+ return ret;
+}
+
+static int mbochs_probe(struct mdev_device *mdev)
+{
+ struct mdev_state *mdev_state;
+ int ret = -ENOMEM;
+
+ mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
+ &mbochs_dev_ops);
+ if (IS_ERR(mdev_state))
+ return PTR_ERR(mdev_state);
+
+ ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
+ if (ret)
+ goto err_put_vdev;
+ dev_set_drvdata(&mdev->dev, mdev_state);
+ return 0;
+
+err_put_vdev:
+ vfio_put_device(&mdev_state->vdev);
+ return ret;
+}
+
+static void mbochs_release_dev(struct vfio_device *vdev)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+
+ atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes);
+ kfree(mdev_state->pages);
+ kfree(mdev_state->vconfig);
+ vfio_free_device(vdev);
+}
+
+static void mbochs_remove(struct mdev_device *mdev)
+{
+ struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
+
+ vfio_unregister_group_dev(&mdev_state->vdev);
+ vfio_put_device(&mdev_state->vdev);
+}
+
+static ssize_t mbochs_read(struct vfio_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+
+read_err:
+ return -EFAULT;
+}
+
+static ssize_t mbochs_write(struct vfio_device *vdev, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 1;
+ }
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+write_err:
+ return -EFAULT;
+}
+
+static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
+ pgoff_t pgoff)
+{
+ WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
+
+ if (!mdev_state->pages[pgoff]) {
+ mdev_state->pages[pgoff] =
+ alloc_pages(GFP_HIGHUSER | __GFP_ZERO, 0);
+ if (!mdev_state->pages[pgoff])
+ return NULL;
+ }
+
+ get_page(mdev_state->pages[pgoff]);
+ return mdev_state->pages[pgoff];
+}
+
+static struct page *mbochs_get_page(struct mdev_state *mdev_state,
+ pgoff_t pgoff)
+{
+ struct page *page;
+
+ if (WARN_ON(pgoff >= mdev_state->pagecount))
+ return NULL;
+
+ mutex_lock(&mdev_state->ops_lock);
+ page = __mbochs_get_page(mdev_state, pgoff);
+ mutex_unlock(&mdev_state->ops_lock);
+
+ return page;
+}
+
+static void mbochs_put_pages(struct mdev_state *mdev_state)
+{
+ struct device *dev = mdev_dev(mdev_state->mdev);
+ int i, count = 0;
+
+ WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
+
+ for (i = 0; i < mdev_state->pagecount; i++) {
+ if (!mdev_state->pages[i])
+ continue;
+ put_page(mdev_state->pages[i]);
+ mdev_state->pages[i] = NULL;
+ count++;
+ }
+ dev_dbg(dev, "%s: %d pages released\n", __func__, count);
+}
+
+static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct mdev_state *mdev_state = vma->vm_private_data;
+ pgoff_t page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+ if (page_offset >= mdev_state->pagecount)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = mbochs_get_page(mdev_state, page_offset);
+ if (!vmf->page)
+ return VM_FAULT_SIGBUS;
+
+ return 0;
+}
+
+static const struct vm_operations_struct mbochs_region_vm_ops = {
+ .fault = mbochs_region_vm_fault,
+};
+
+static int mbochs_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+
+ if (vma->vm_pgoff != MBOCHS_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
+ return -EINVAL;
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if (vma->vm_end - vma->vm_start > mdev_state->memsize)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &mbochs_region_vm_ops;
+ vma->vm_private_data = mdev_state;
+ return 0;
+}
+
+static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
+
+ if (WARN_ON(vmf->pgoff >= dmabuf->pagecount))
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = dmabuf->pages[vmf->pgoff];
+ get_page(vmf->page);
+ return 0;
+}
+
+static const struct vm_operations_struct mbochs_dmabuf_vm_ops = {
+ .fault = mbochs_dmabuf_vm_fault,
+};
+
+static int mbochs_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ struct mbochs_dmabuf *dmabuf = buf->priv;
+ struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
+
+ dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
+
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &mbochs_dmabuf_vm_ops;
+ vma->vm_private_data = dmabuf;
+ return 0;
+}
+
+static void mbochs_print_dmabuf(struct mbochs_dmabuf *dmabuf,
+ const char *prefix)
+{
+ struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
+ u32 fourcc = dmabuf->mode.drm_format;
+
+ dev_dbg(dev, "%s/%d: %c%c%c%c, %dx%d, stride %d, off 0x%llx, size 0x%llx, pages %ld\n",
+ prefix, dmabuf->id,
+ fourcc ? ((fourcc >> 0) & 0xff) : '-',
+ fourcc ? ((fourcc >> 8) & 0xff) : '-',
+ fourcc ? ((fourcc >> 16) & 0xff) : '-',
+ fourcc ? ((fourcc >> 24) & 0xff) : '-',
+ dmabuf->mode.width, dmabuf->mode.height, dmabuf->mode.stride,
+ dmabuf->mode.offset, dmabuf->mode.size, dmabuf->pagecount);
+}
+
+static struct sg_table *mbochs_map_dmabuf(struct dma_buf_attachment *at,
+ enum dma_data_direction direction)
+{
+ struct mbochs_dmabuf *dmabuf = at->dmabuf->priv;
+ struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
+ struct sg_table *sg;
+
+ dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ goto err1;
+ if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount,
+ 0, dmabuf->mode.size, GFP_KERNEL) < 0)
+ goto err2;
+ if (dma_map_sgtable(at->dev, sg, direction, 0))
+ goto err3;
+
+ return sg;
+
+err3:
+ sg_free_table(sg);
+err2:
+ kfree(sg);
+err1:
+ return ERR_PTR(-ENOMEM);
+}
+
+static void mbochs_unmap_dmabuf(struct dma_buf_attachment *at,
+ struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ struct mbochs_dmabuf *dmabuf = at->dmabuf->priv;
+ struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
+
+ dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
+
+ dma_unmap_sgtable(at->dev, sg, direction, 0);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void mbochs_release_dmabuf(struct dma_buf *buf)
+{
+ struct mbochs_dmabuf *dmabuf = buf->priv;
+ struct mdev_state *mdev_state = dmabuf->mdev_state;
+ struct device *dev = mdev_dev(mdev_state->mdev);
+ pgoff_t pg;
+
+ dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
+
+ for (pg = 0; pg < dmabuf->pagecount; pg++)
+ put_page(dmabuf->pages[pg]);
+
+ mutex_lock(&mdev_state->ops_lock);
+ dmabuf->buf = NULL;
+ if (dmabuf->unlinked)
+ kfree(dmabuf);
+ mutex_unlock(&mdev_state->ops_lock);
+}
+
+static struct dma_buf_ops mbochs_dmabuf_ops = {
+ .map_dma_buf = mbochs_map_dmabuf,
+ .unmap_dma_buf = mbochs_unmap_dmabuf,
+ .release = mbochs_release_dmabuf,
+ .mmap = mbochs_mmap_dmabuf,
+};
+
+static struct mbochs_dmabuf *mbochs_dmabuf_alloc(struct mdev_state *mdev_state,
+ struct mbochs_mode *mode)
+{
+ struct mbochs_dmabuf *dmabuf;
+ pgoff_t page_offset, pg;
+
+ WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
+
+ dmabuf = kzalloc(sizeof(struct mbochs_dmabuf), GFP_KERNEL);
+ if (!dmabuf)
+ return NULL;
+
+ dmabuf->mode = *mode;
+ dmabuf->id = mdev_state->next_id++;
+ dmabuf->pagecount = DIV_ROUND_UP(mode->size, PAGE_SIZE);
+ dmabuf->pages = kcalloc(dmabuf->pagecount, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!dmabuf->pages)
+ goto err_free_dmabuf;
+
+ page_offset = dmabuf->mode.offset >> PAGE_SHIFT;
+ for (pg = 0; pg < dmabuf->pagecount; pg++) {
+ dmabuf->pages[pg] = __mbochs_get_page(mdev_state,
+ page_offset + pg);
+ if (!dmabuf->pages[pg])
+ goto err_free_pages;
+ }
+
+ dmabuf->mdev_state = mdev_state;
+ list_add(&dmabuf->next, &mdev_state->dmabufs);
+
+ mbochs_print_dmabuf(dmabuf, __func__);
+ return dmabuf;
+
+err_free_pages:
+ while (pg > 0)
+ put_page(dmabuf->pages[--pg]);
+ kfree(dmabuf->pages);
+err_free_dmabuf:
+ kfree(dmabuf);
+ return NULL;
+}
+
+static struct mbochs_dmabuf *
+mbochs_dmabuf_find_by_mode(struct mdev_state *mdev_state,
+ struct mbochs_mode *mode)
+{
+ struct mbochs_dmabuf *dmabuf;
+
+ WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
+
+ list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
+ if (mbochs_modes_equal(&dmabuf->mode, mode))
+ return dmabuf;
+
+ return NULL;
+}
+
+static struct mbochs_dmabuf *
+mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id)
+{
+ struct mbochs_dmabuf *dmabuf;
+
+ WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
+
+ list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
+ if (dmabuf->id == id)
+ return dmabuf;
+
+ return NULL;
+}
+
+static int mbochs_dmabuf_export(struct mbochs_dmabuf *dmabuf)
+{
+ struct mdev_state *mdev_state = dmabuf->mdev_state;
+ struct device *dev = mdev_state->vdev.dev;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *buf;
+
+ WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
+
+ if (!IS_ALIGNED(dmabuf->mode.offset, PAGE_SIZE)) {
+ dev_info_ratelimited(dev, "%s: framebuffer not page-aligned\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ exp_info.ops = &mbochs_dmabuf_ops;
+ exp_info.size = dmabuf->mode.size;
+ exp_info.priv = dmabuf;
+
+ buf = dma_buf_export(&exp_info);
+ if (IS_ERR(buf)) {
+ dev_info_ratelimited(dev, "%s: dma_buf_export failed: %ld\n",
+ __func__, PTR_ERR(buf));
+ return PTR_ERR(buf);
+ }
+
+ dmabuf->buf = buf;
+ dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
+ return 0;
+}
+
+static int mbochs_get_region_info(struct mdev_state *mdev_state,
+ struct vfio_region_info_ext *ext)
+{
+ struct vfio_region_info *region_info = &ext->base;
+
+ if (region_info->index >= MBOCHS_NUM_REGIONS)
+ return -EINVAL;
+
+ switch (region_info->index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ region_info->offset = 0;
+ region_info->size = MBOCHS_CONFIG_SPACE_SIZE;
+ region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE);
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ region_info->offset = MBOCHS_MEMORY_BAR_OFFSET;
+ region_info->size = mdev_state->memsize;
+ region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_MMAP);
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ region_info->offset = MBOCHS_MMIO_BAR_OFFSET;
+ region_info->size = MBOCHS_MMIO_BAR_SIZE;
+ region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE);
+ break;
+ case MBOCHS_EDID_REGION_INDEX:
+ ext->base.argsz = sizeof(*ext);
+ ext->base.offset = MBOCHS_EDID_OFFSET;
+ ext->base.size = MBOCHS_EDID_SIZE;
+ ext->base.flags = (VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_CAPS);
+ ext->base.cap_offset = offsetof(typeof(*ext), type);
+ ext->type.header.id = VFIO_REGION_INFO_CAP_TYPE;
+ ext->type.header.version = 1;
+ ext->type.header.next = 0;
+ ext->type.type = VFIO_REGION_TYPE_GFX;
+ ext->type.subtype = VFIO_REGION_SUBTYPE_GFX_EDID;
+ break;
+ default:
+ region_info->size = 0;
+ region_info->offset = 0;
+ region_info->flags = 0;
+ }
+
+ return 0;
+}
+
+static int mbochs_get_irq_info(struct vfio_irq_info *irq_info)
+{
+ irq_info->count = 0;
+ return 0;
+}
+
+static int mbochs_get_device_info(struct vfio_device_info *dev_info)
+{
+ dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
+ dev_info->num_regions = MBOCHS_NUM_REGIONS;
+ dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
+ return 0;
+}
+
+static int mbochs_query_gfx_plane(struct mdev_state *mdev_state,
+ struct vfio_device_gfx_plane_info *plane)
+{
+ struct mbochs_dmabuf *dmabuf;
+ struct mbochs_mode mode;
+ int ret;
+
+ if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
+ if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
+ VFIO_GFX_PLANE_TYPE_DMABUF))
+ return 0;
+ return -EINVAL;
+ }
+
+ if (plane->flags != VFIO_GFX_PLANE_TYPE_DMABUF)
+ return -EINVAL;
+
+ plane->drm_format_mod = 0;
+ plane->x_pos = 0;
+ plane->y_pos = 0;
+ plane->x_hot = 0;
+ plane->y_hot = 0;
+
+ mutex_lock(&mdev_state->ops_lock);
+
+ ret = -EINVAL;
+ if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY)
+ ret = mbochs_check_framebuffer(mdev_state, &mode);
+ if (ret < 0) {
+ plane->drm_format = 0;
+ plane->width = 0;
+ plane->height = 0;
+ plane->stride = 0;
+ plane->size = 0;
+ plane->dmabuf_id = 0;
+ goto done;
+ }
+
+ dmabuf = mbochs_dmabuf_find_by_mode(mdev_state, &mode);
+ if (!dmabuf)
+ mbochs_dmabuf_alloc(mdev_state, &mode);
+ if (!dmabuf) {
+ mutex_unlock(&mdev_state->ops_lock);
+ return -ENOMEM;
+ }
+
+ plane->drm_format = dmabuf->mode.drm_format;
+ plane->width = dmabuf->mode.width;
+ plane->height = dmabuf->mode.height;
+ plane->stride = dmabuf->mode.stride;
+ plane->size = dmabuf->mode.size;
+ plane->dmabuf_id = dmabuf->id;
+
+done:
+ if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY &&
+ mdev_state->active_id != plane->dmabuf_id) {
+ dev_dbg(mdev_state->vdev.dev, "%s: primary: %d => %d\n",
+ __func__, mdev_state->active_id, plane->dmabuf_id);
+ mdev_state->active_id = plane->dmabuf_id;
+ }
+ mutex_unlock(&mdev_state->ops_lock);
+ return 0;
+}
+
+static int mbochs_get_gfx_dmabuf(struct mdev_state *mdev_state, u32 id)
+{
+ struct mbochs_dmabuf *dmabuf;
+
+ mutex_lock(&mdev_state->ops_lock);
+
+ dmabuf = mbochs_dmabuf_find_by_id(mdev_state, id);
+ if (!dmabuf) {
+ mutex_unlock(&mdev_state->ops_lock);
+ return -ENOENT;
+ }
+
+ if (!dmabuf->buf)
+ mbochs_dmabuf_export(dmabuf);
+
+ mutex_unlock(&mdev_state->ops_lock);
+
+ if (!dmabuf->buf)
+ return -EINVAL;
+
+ return dma_buf_fd(dmabuf->buf, 0);
+}
+
+static long mbochs_ioctl(struct vfio_device *vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ int ret = 0;
+ unsigned long minsz, outsz;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ ret = mbochs_get_device_info(&info);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+ case VFIO_DEVICE_GET_REGION_INFO:
+ {
+ struct vfio_region_info_ext info;
+
+ minsz = offsetofend(typeof(info), base.offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ outsz = info.base.argsz;
+ if (outsz < minsz)
+ return -EINVAL;
+ if (outsz > sizeof(info))
+ return -EINVAL;
+
+ ret = mbochs_get_region_info(mdev_state, &info);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &info, outsz))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if ((info.argsz < minsz) ||
+ (info.index >= VFIO_PCI_NUM_IRQS))
+ return -EINVAL;
+
+ ret = mbochs_get_irq_info(&info);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case VFIO_DEVICE_QUERY_GFX_PLANE:
+ {
+ struct vfio_device_gfx_plane_info plane;
+
+ minsz = offsetofend(struct vfio_device_gfx_plane_info,
+ region_index);
+
+ if (copy_from_user(&plane, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (plane.argsz < minsz)
+ return -EINVAL;
+
+ ret = mbochs_query_gfx_plane(mdev_state, &plane);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &plane, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case VFIO_DEVICE_GET_GFX_DMABUF:
+ {
+ u32 dmabuf_id;
+
+ if (get_user(dmabuf_id, (__u32 __user *)arg))
+ return -EFAULT;
+
+ return mbochs_get_gfx_dmabuf(mdev_state, dmabuf_id);
+ }
+
+ case VFIO_DEVICE_SET_IRQS:
+ return -EINVAL;
+
+ case VFIO_DEVICE_RESET:
+ return mbochs_reset(mdev_state);
+ }
+ return -ENOTTY;
+}
+
+static void mbochs_close_device(struct vfio_device *vdev)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ struct mbochs_dmabuf *dmabuf, *tmp;
+
+ mutex_lock(&mdev_state->ops_lock);
+
+ list_for_each_entry_safe(dmabuf, tmp, &mdev_state->dmabufs, next) {
+ list_del(&dmabuf->next);
+ if (dmabuf->buf) {
+ /* free in mbochs_release_dmabuf() */
+ dmabuf->unlinked = true;
+ } else {
+ kfree(dmabuf);
+ }
+ }
+ mbochs_put_pages(mdev_state);
+
+ mutex_unlock(&mdev_state->ops_lock);
+}
+
+static ssize_t
+memory_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_state *mdev_state = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d MB\n", mdev_state->type->mbytes);
+}
+static DEVICE_ATTR_RO(memory);
+
+static struct attribute *mdev_dev_attrs[] = {
+ &dev_attr_memory.attr,
+ NULL,
+};
+
+static const struct attribute_group mdev_dev_group = {
+ .name = "vendor",
+ .attrs = mdev_dev_attrs,
+};
+
+static const struct attribute_group *mdev_dev_groups[] = {
+ &mdev_dev_group,
+ NULL,
+};
+
+static ssize_t mbochs_show_description(struct mdev_type *mtype, char *buf)
+{
+ struct mbochs_type *type =
+ container_of(mtype, struct mbochs_type, type);
+
+ return sprintf(buf, "virtual display, %d MB video memory\n",
+ type ? type->mbytes : 0);
+}
+
+static unsigned int mbochs_get_available(struct mdev_type *mtype)
+{
+ struct mbochs_type *type =
+ container_of(mtype, struct mbochs_type, type);
+
+ return atomic_read(&mbochs_avail_mbytes) / type->mbytes;
+}
+
+static const struct vfio_device_ops mbochs_dev_ops = {
+ .close_device = mbochs_close_device,
+ .init = mbochs_init_dev,
+ .release = mbochs_release_dev,
+ .read = mbochs_read,
+ .write = mbochs_write,
+ .ioctl = mbochs_ioctl,
+ .mmap = mbochs_mmap,
+};
+
+static struct mdev_driver mbochs_driver = {
+ .device_api = VFIO_DEVICE_API_PCI_STRING,
+ .driver = {
+ .name = "mbochs",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ .dev_groups = mdev_dev_groups,
+ },
+ .probe = mbochs_probe,
+ .remove = mbochs_remove,
+ .get_available = mbochs_get_available,
+ .show_description = mbochs_show_description,
+};
+
+static const struct file_operations vd_fops = {
+ .owner = THIS_MODULE,
+};
+
+static void mbochs_device_release(struct device *dev)
+{
+ /* nothing */
+}
+
+static int __init mbochs_dev_init(void)
+{
+ int ret = 0;
+
+ atomic_set(&mbochs_avail_mbytes, max_mbytes);
+
+ ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK + 1, MBOCHS_NAME);
+ if (ret < 0) {
+ pr_err("Error: failed to register mbochs_dev, err: %d\n", ret);
+ return ret;
+ }
+ cdev_init(&mbochs_cdev, &vd_fops);
+ cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK + 1);
+ pr_info("%s: major %d\n", __func__, MAJOR(mbochs_devt));
+
+ ret = mdev_register_driver(&mbochs_driver);
+ if (ret)
+ goto err_cdev;
+
+ mbochs_class = class_create(THIS_MODULE, MBOCHS_CLASS_NAME);
+ if (IS_ERR(mbochs_class)) {
+ pr_err("Error: failed to register mbochs_dev class\n");
+ ret = PTR_ERR(mbochs_class);
+ goto err_driver;
+ }
+ mbochs_dev.class = mbochs_class;
+ mbochs_dev.release = mbochs_device_release;
+ dev_set_name(&mbochs_dev, "%s", MBOCHS_NAME);
+
+ ret = device_register(&mbochs_dev);
+ if (ret)
+ goto err_class;
+
+ ret = mdev_register_parent(&mbochs_parent, &mbochs_dev, &mbochs_driver,
+ mbochs_mdev_types,
+ ARRAY_SIZE(mbochs_mdev_types));
+ if (ret)
+ goto err_device;
+
+ return 0;
+
+err_device:
+ device_unregister(&mbochs_dev);
+err_class:
+ class_destroy(mbochs_class);
+err_driver:
+ mdev_unregister_driver(&mbochs_driver);
+err_cdev:
+ cdev_del(&mbochs_cdev);
+ unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
+ return ret;
+}
+
+static void __exit mbochs_dev_exit(void)
+{
+ mbochs_dev.bus = NULL;
+ mdev_unregister_parent(&mbochs_parent);
+
+ device_unregister(&mbochs_dev);
+ mdev_unregister_driver(&mbochs_driver);
+ cdev_del(&mbochs_cdev);
+ unregister_chrdev_region(mbochs_devt, MINORMASK + 1);
+ class_destroy(mbochs_class);
+ mbochs_class = NULL;
+}
+
+MODULE_IMPORT_NS(DMA_BUF);
+module_init(mbochs_dev_init)
+module_exit(mbochs_dev_exit)
diff --git a/samples/vfio-mdev/mdpy-defs.h b/samples/vfio-mdev/mdpy-defs.h
new file mode 100644
index 000000000..961c55ec3
--- /dev/null
+++ b/samples/vfio-mdev/mdpy-defs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Simple pci display device.
+ *
+ * Framebuffer memory is pci bar 0.
+ * Configuration (read-only) is in pci config space.
+ * Format field uses drm fourcc codes.
+ * ATM only DRM_FORMAT_XRGB8888 is supported.
+ */
+
+/* pci ids */
+#define MDPY_PCI_VENDOR_ID PCI_VENDOR_ID_REDHAT
+#define MDPY_PCI_DEVICE_ID 0x000f
+#define MDPY_PCI_SUBVENDOR_ID PCI_SUBVENDOR_ID_REDHAT_QUMRANET
+#define MDPY_PCI_SUBDEVICE_ID PCI_SUBDEVICE_ID_QEMU
+
+/* pci cfg space offsets for fb config (dword) */
+#define MDPY_VENDORCAP_OFFSET 0x40
+#define MDPY_VENDORCAP_SIZE 0x10
+#define MDPY_FORMAT_OFFSET (MDPY_VENDORCAP_OFFSET + 0x04)
+#define MDPY_WIDTH_OFFSET (MDPY_VENDORCAP_OFFSET + 0x08)
+#define MDPY_HEIGHT_OFFSET (MDPY_VENDORCAP_OFFSET + 0x0c)
diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
new file mode 100644
index 000000000..4eb7aa11c
--- /dev/null
+++ b/samples/vfio-mdev/mdpy-fb.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Framebuffer driver for mdpy (mediated virtual pci display device).
+ *
+ * See mdpy-defs.h for device specs
+ *
+ * (c) Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * Using some code snippets from simplefb and cirrusfb.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <drm/drm_fourcc.h>
+#include "mdpy-defs.h"
+
+static const struct fb_fix_screeninfo mdpy_fb_fix = {
+ .id = "mdpy-fb",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .accel = FB_ACCEL_NONE,
+};
+
+static const struct fb_var_screeninfo mdpy_fb_var = {
+ .height = -1,
+ .width = -1,
+ .activate = FB_ACTIVATE_NOW,
+ .vmode = FB_VMODE_NONINTERLACED,
+
+ .bits_per_pixel = 32,
+ .transp.offset = 24,
+ .red.offset = 16,
+ .green.offset = 8,
+ .blue.offset = 0,
+ .transp.length = 8,
+ .red.length = 8,
+ .green.length = 8,
+ .blue.length = 8,
+};
+
+#define PSEUDO_PALETTE_SIZE 16
+
+struct mdpy_fb_par {
+ u32 palette[PSEUDO_PALETTE_SIZE];
+};
+
+static int mdpy_fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ u32 *pal = info->pseudo_palette;
+ u32 cr = red >> (16 - info->var.red.length);
+ u32 cg = green >> (16 - info->var.green.length);
+ u32 cb = blue >> (16 - info->var.blue.length);
+ u32 value, mask;
+
+ if (regno >= PSEUDO_PALETTE_SIZE)
+ return -EINVAL;
+
+ value = (cr << info->var.red.offset) |
+ (cg << info->var.green.offset) |
+ (cb << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
+ pal[regno] = value;
+
+ return 0;
+}
+
+static void mdpy_fb_destroy(struct fb_info *info)
+{
+ if (info->screen_base)
+ iounmap(info->screen_base);
+}
+
+static const struct fb_ops mdpy_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_destroy = mdpy_fb_destroy,
+ .fb_setcolreg = mdpy_fb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int mdpy_fb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct fb_info *info;
+ struct mdpy_fb_par *par;
+ u32 format, width, height;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = pci_request_regions(pdev, "mdpy-fb");
+ if (ret < 0)
+ goto err_disable_dev;
+
+ pci_read_config_dword(pdev, MDPY_FORMAT_OFFSET, &format);
+ pci_read_config_dword(pdev, MDPY_WIDTH_OFFSET, &width);
+ pci_read_config_dword(pdev, MDPY_HEIGHT_OFFSET, &height);
+ if (format != DRM_FORMAT_XRGB8888) {
+ pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
+ format, DRM_FORMAT_XRGB8888);
+ ret = -EINVAL;
+ goto err_release_regions;
+ }
+ if (width < 100 || width > 10000) {
+ pci_err(pdev, "width (%d) out of range\n", width);
+ ret = -EINVAL;
+ goto err_release_regions;
+ }
+ if (height < 100 || height > 10000) {
+ pci_err(pdev, "height (%d) out of range\n", height);
+ ret = -EINVAL;
+ goto err_release_regions;
+ }
+ pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
+ width, height);
+
+ info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+ pci_set_drvdata(pdev, info);
+ par = info->par;
+
+ info->fix = mdpy_fb_fix;
+ info->fix.smem_start = pci_resource_start(pdev, 0);
+ info->fix.smem_len = pci_resource_len(pdev, 0);
+ info->fix.line_length = width * 4;
+
+ info->var = mdpy_fb_var;
+ info->var.xres = width;
+ info->var.yres = height;
+ info->var.xres_virtual = width;
+ info->var.yres_virtual = height;
+
+ info->screen_size = info->fix.smem_len;
+ info->screen_base = ioremap(info->fix.smem_start,
+ info->screen_size);
+ if (!info->screen_base) {
+ pci_err(pdev, "ioremap(pcibar) failed\n");
+ ret = -EIO;
+ goto err_release_fb;
+ }
+
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+ info->apertures->ranges[0].base = info->fix.smem_start;
+ info->apertures->ranges[0].size = info->fix.smem_len;
+
+ info->fbops = &mdpy_fb_ops;
+ info->flags = FBINFO_DEFAULT;
+ info->pseudo_palette = par->palette;
+
+ ret = register_framebuffer(info);
+ if (ret < 0) {
+ pci_err(pdev, "mdpy-fb device register failed: %d\n", ret);
+ goto err_unmap;
+ }
+
+ pci_info(pdev, "fb%d registered\n", info->node);
+ return 0;
+
+err_unmap:
+ iounmap(info->screen_base);
+
+err_release_fb:
+ framebuffer_release(info);
+
+err_release_regions:
+ pci_release_regions(pdev);
+
+err_disable_dev:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void mdpy_fb_remove(struct pci_dev *pdev)
+{
+ struct fb_info *info = pci_get_drvdata(pdev);
+
+ unregister_framebuffer(info);
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_device_id mdpy_fb_pci_table[] = {
+ {
+ .vendor = MDPY_PCI_VENDOR_ID,
+ .device = MDPY_PCI_DEVICE_ID,
+ .subvendor = MDPY_PCI_SUBVENDOR_ID,
+ .subdevice = MDPY_PCI_SUBDEVICE_ID,
+ }, {
+ /* end of list */
+ }
+};
+
+static struct pci_driver mdpy_fb_pci_driver = {
+ .name = "mdpy-fb",
+ .id_table = mdpy_fb_pci_table,
+ .probe = mdpy_fb_probe,
+ .remove = mdpy_fb_remove,
+};
+
+static int __init mdpy_fb_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mdpy_fb_pci_driver);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+module_init(mdpy_fb_init);
+
+MODULE_DEVICE_TABLE(pci, mdpy_fb_pci_table);
+MODULE_LICENSE("GPL v2");
diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c
new file mode 100644
index 000000000..946e8cfde
--- /dev/null
+++ b/samples/vfio-mdev/mdpy.c
@@ -0,0 +1,760 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mediated virtual PCI display host device driver
+ *
+ * See mdpy-defs.h for device specs
+ *
+ * (c) Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * based on mtty driver which is:
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/cdev.h>
+#include <linux/vfio.h>
+#include <linux/iommu.h>
+#include <linux/sysfs.h>
+#include <linux/mdev.h>
+#include <linux/pci.h>
+#include <drm/drm_fourcc.h>
+#include "mdpy-defs.h"
+
+#define MDPY_NAME "mdpy"
+#define MDPY_CLASS_NAME "mdpy"
+
+#define MDPY_CONFIG_SPACE_SIZE 0xff
+#define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
+#define MDPY_DISPLAY_REGION 16
+
+#define STORE_LE16(addr, val) (*(u16 *)addr = val)
+#define STORE_LE32(addr, val) (*(u32 *)addr = val)
+
+
+MODULE_LICENSE("GPL v2");
+
+#define MDPY_TYPE_1 "vga"
+#define MDPY_TYPE_2 "xga"
+#define MDPY_TYPE_3 "hd"
+
+static struct mdpy_type {
+ struct mdev_type type;
+ u32 format;
+ u32 bytepp;
+ u32 width;
+ u32 height;
+} mdpy_types[] = {
+ {
+ .type.sysfs_name = MDPY_TYPE_1,
+ .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_1,
+ .format = DRM_FORMAT_XRGB8888,
+ .bytepp = 4,
+ .width = 640,
+ .height = 480,
+ }, {
+ .type.sysfs_name = MDPY_TYPE_2,
+ .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_2,
+ .format = DRM_FORMAT_XRGB8888,
+ .bytepp = 4,
+ .width = 1024,
+ .height = 768,
+ }, {
+ .type.sysfs_name = MDPY_TYPE_3,
+ .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_3,
+ .format = DRM_FORMAT_XRGB8888,
+ .bytepp = 4,
+ .width = 1920,
+ .height = 1080,
+ },
+};
+
+static struct mdev_type *mdpy_mdev_types[] = {
+ &mdpy_types[0].type,
+ &mdpy_types[1].type,
+ &mdpy_types[2].type,
+};
+
+static dev_t mdpy_devt;
+static struct class *mdpy_class;
+static struct cdev mdpy_cdev;
+static struct device mdpy_dev;
+static struct mdev_parent mdpy_parent;
+static const struct vfio_device_ops mdpy_dev_ops;
+
+/* State of each mdev device */
+struct mdev_state {
+ struct vfio_device vdev;
+ u8 *vconfig;
+ u32 bar_mask;
+ struct mutex ops_lock;
+ struct mdev_device *mdev;
+ struct vfio_device_info dev_info;
+
+ const struct mdpy_type *type;
+ u32 memsize;
+ void *memblk;
+};
+
+static void mdpy_create_config_space(struct mdev_state *mdev_state)
+{
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
+ MDPY_PCI_VENDOR_ID);
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
+ MDPY_PCI_DEVICE_ID);
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
+ MDPY_PCI_SUBVENDOR_ID);
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
+ MDPY_PCI_SUBDEVICE_ID);
+
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
+ PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
+ PCI_STATUS_CAP_LIST);
+ STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
+ PCI_CLASS_DISPLAY_OTHER);
+ mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
+
+ STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_32 |
+ PCI_BASE_ADDRESS_MEM_PREFETCH);
+ mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
+
+ /* vendor specific capability for the config registers */
+ mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET;
+ mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
+ mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
+ mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
+ STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
+ mdev_state->type->format);
+ STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
+ mdev_state->type->width);
+ STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
+ mdev_state->type->height);
+}
+
+static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
+ char *buf, u32 count)
+{
+ struct device *dev = mdev_dev(mdev_state->mdev);
+ u32 cfg_addr;
+
+ switch (offset) {
+ case PCI_BASE_ADDRESS_0:
+ cfg_addr = *(u32 *)buf;
+
+ if (cfg_addr == 0xffffffff) {
+ cfg_addr = (cfg_addr & mdev_state->bar_mask);
+ } else {
+ cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
+ if (cfg_addr)
+ dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
+ }
+
+ cfg_addr |= (mdev_state->vconfig[offset] &
+ ~PCI_BASE_ADDRESS_MEM_MASK);
+ STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
+ break;
+ }
+}
+
+static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
+ size_t count, loff_t pos, bool is_write)
+{
+ int ret = 0;
+
+ mutex_lock(&mdev_state->ops_lock);
+
+ if (pos < MDPY_CONFIG_SPACE_SIZE) {
+ if (is_write)
+ handle_pci_cfg_write(mdev_state, pos, buf, count);
+ else
+ memcpy(buf, (mdev_state->vconfig + pos), count);
+
+ } else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
+ (pos + count <=
+ MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
+ pos -= MDPY_MEMORY_BAR_OFFSET;
+ if (is_write)
+ memcpy(mdev_state->memblk, buf, count);
+ else
+ memcpy(buf, mdev_state->memblk, count);
+
+ } else {
+ dev_info(mdev_state->vdev.dev,
+ "%s: %s @0x%llx (unhandled)\n", __func__,
+ is_write ? "WR" : "RD", pos);
+ ret = -1;
+ goto accessfailed;
+ }
+
+ ret = count;
+
+
+accessfailed:
+ mutex_unlock(&mdev_state->ops_lock);
+
+ return ret;
+}
+
+static int mdpy_reset(struct mdev_state *mdev_state)
+{
+ u32 stride, i;
+
+ /* initialize with gray gradient */
+ stride = mdev_state->type->width * mdev_state->type->bytepp;
+ for (i = 0; i < mdev_state->type->height; i++)
+ memset(mdev_state->memblk + i * stride,
+ i * 255 / mdev_state->type->height,
+ stride);
+ return 0;
+}
+
+static int mdpy_init_dev(struct vfio_device *vdev)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ struct mdev_device *mdev = to_mdev_device(vdev->dev);
+ const struct mdpy_type *type =
+ container_of(mdev->type, struct mdpy_type, type);
+ u32 fbsize;
+ int ret = -ENOMEM;
+
+ mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
+ if (!mdev_state->vconfig)
+ return ret;
+
+ fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
+
+ mdev_state->memblk = vmalloc_user(fbsize);
+ if (!mdev_state->memblk)
+ goto out_vconfig;
+
+ mutex_init(&mdev_state->ops_lock);
+ mdev_state->mdev = mdev;
+ mdev_state->type = type;
+ mdev_state->memsize = fbsize;
+ mdpy_create_config_space(mdev_state);
+ mdpy_reset(mdev_state);
+
+ dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name,
+ type->width, type->height);
+ return 0;
+
+out_vconfig:
+ kfree(mdev_state->vconfig);
+ return ret;
+}
+
+static int mdpy_probe(struct mdev_device *mdev)
+{
+ struct mdev_state *mdev_state;
+ int ret;
+
+ mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
+ &mdpy_dev_ops);
+ if (IS_ERR(mdev_state))
+ return PTR_ERR(mdev_state);
+
+ ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
+ if (ret)
+ goto err_put_vdev;
+ dev_set_drvdata(&mdev->dev, mdev_state);
+ return 0;
+
+err_put_vdev:
+ vfio_put_device(&mdev_state->vdev);
+ return ret;
+}
+
+static void mdpy_release_dev(struct vfio_device *vdev)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+
+ vfree(mdev_state->memblk);
+ kfree(mdev_state->vconfig);
+ vfio_free_device(vdev);
+}
+
+static void mdpy_remove(struct mdev_device *mdev)
+{
+ struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
+
+ dev_info(&mdev->dev, "%s\n", __func__);
+
+ vfio_unregister_group_dev(&mdev_state->vdev);
+ vfio_put_device(&mdev_state->vdev);
+}
+
+static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+
+read_err:
+ return -EFAULT;
+}
+
+static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
+ *ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 1;
+ }
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+write_err:
+ return -EFAULT;
+}
+
+static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+
+ if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
+ return -EINVAL;
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if (vma->vm_end - vma->vm_start > mdev_state->memsize)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
+ return remap_vmalloc_range(vma, mdev_state->memblk, 0);
+}
+
+static int mdpy_get_region_info(struct mdev_state *mdev_state,
+ struct vfio_region_info *region_info,
+ u16 *cap_type_id, void **cap_type)
+{
+ if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
+ region_info->index != MDPY_DISPLAY_REGION)
+ return -EINVAL;
+
+ switch (region_info->index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ region_info->offset = 0;
+ region_info->size = MDPY_CONFIG_SPACE_SIZE;
+ region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE);
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ case MDPY_DISPLAY_REGION:
+ region_info->offset = MDPY_MEMORY_BAR_OFFSET;
+ region_info->size = mdev_state->memsize;
+ region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_MMAP);
+ break;
+ default:
+ region_info->size = 0;
+ region_info->offset = 0;
+ region_info->flags = 0;
+ }
+
+ return 0;
+}
+
+static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
+{
+ irq_info->count = 0;
+ return 0;
+}
+
+static int mdpy_get_device_info(struct vfio_device_info *dev_info)
+{
+ dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
+ dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
+ dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
+ return 0;
+}
+
+static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
+ struct vfio_device_gfx_plane_info *plane)
+{
+ if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
+ if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
+ VFIO_GFX_PLANE_TYPE_REGION))
+ return 0;
+ return -EINVAL;
+ }
+
+ if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
+ return -EINVAL;
+
+ plane->drm_format = mdev_state->type->format;
+ plane->width = mdev_state->type->width;
+ plane->height = mdev_state->type->height;
+ plane->stride = (mdev_state->type->width *
+ mdev_state->type->bytepp);
+ plane->size = mdev_state->memsize;
+ plane->region_index = MDPY_DISPLAY_REGION;
+
+ /* unused */
+ plane->drm_format_mod = 0;
+ plane->x_pos = 0;
+ plane->y_pos = 0;
+ plane->x_hot = 0;
+ plane->y_hot = 0;
+
+ return 0;
+}
+
+static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ unsigned long minsz;
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ ret = mdpy_get_device_info(&info);
+ if (ret)
+ return ret;
+
+ memcpy(&mdev_state->dev_info, &info, sizeof(info));
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+ case VFIO_DEVICE_GET_REGION_INFO:
+ {
+ struct vfio_region_info info;
+ u16 cap_type_id = 0;
+ void *cap_type = NULL;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
+ &cap_type);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if ((info.argsz < minsz) ||
+ (info.index >= mdev_state->dev_info.num_irqs))
+ return -EINVAL;
+
+ ret = mdpy_get_irq_info(&info);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case VFIO_DEVICE_QUERY_GFX_PLANE:
+ {
+ struct vfio_device_gfx_plane_info plane;
+
+ minsz = offsetofend(struct vfio_device_gfx_plane_info,
+ region_index);
+
+ if (copy_from_user(&plane, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (plane.argsz < minsz)
+ return -EINVAL;
+
+ ret = mdpy_query_gfx_plane(mdev_state, &plane);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &plane, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case VFIO_DEVICE_SET_IRQS:
+ return -EINVAL;
+
+ case VFIO_DEVICE_RESET:
+ return mdpy_reset(mdev_state);
+ }
+ return -ENOTTY;
+}
+
+static ssize_t
+resolution_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_state *mdev_state = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%dx%d\n",
+ mdev_state->type->width,
+ mdev_state->type->height);
+}
+static DEVICE_ATTR_RO(resolution);
+
+static struct attribute *mdev_dev_attrs[] = {
+ &dev_attr_resolution.attr,
+ NULL,
+};
+
+static const struct attribute_group mdev_dev_group = {
+ .name = "vendor",
+ .attrs = mdev_dev_attrs,
+};
+
+static const struct attribute_group *mdev_dev_groups[] = {
+ &mdev_dev_group,
+ NULL,
+};
+
+static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf)
+{
+ struct mdpy_type *type = container_of(mtype, struct mdpy_type, type);
+
+ return sprintf(buf, "virtual display, %dx%d framebuffer\n",
+ type->width, type->height);
+}
+
+static const struct vfio_device_ops mdpy_dev_ops = {
+ .init = mdpy_init_dev,
+ .release = mdpy_release_dev,
+ .read = mdpy_read,
+ .write = mdpy_write,
+ .ioctl = mdpy_ioctl,
+ .mmap = mdpy_mmap,
+};
+
+static struct mdev_driver mdpy_driver = {
+ .device_api = VFIO_DEVICE_API_PCI_STRING,
+ .max_instances = 4,
+ .driver = {
+ .name = "mdpy",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ .dev_groups = mdev_dev_groups,
+ },
+ .probe = mdpy_probe,
+ .remove = mdpy_remove,
+ .show_description = mdpy_show_description,
+};
+
+static const struct file_operations vd_fops = {
+ .owner = THIS_MODULE,
+};
+
+static void mdpy_device_release(struct device *dev)
+{
+ /* nothing */
+}
+
+static int __init mdpy_dev_init(void)
+{
+ int ret = 0;
+
+ ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
+ if (ret < 0) {
+ pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
+ return ret;
+ }
+ cdev_init(&mdpy_cdev, &vd_fops);
+ cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
+ pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
+
+ ret = mdev_register_driver(&mdpy_driver);
+ if (ret)
+ goto err_cdev;
+
+ mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
+ if (IS_ERR(mdpy_class)) {
+ pr_err("Error: failed to register mdpy_dev class\n");
+ ret = PTR_ERR(mdpy_class);
+ goto err_driver;
+ }
+ mdpy_dev.class = mdpy_class;
+ mdpy_dev.release = mdpy_device_release;
+ dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
+
+ ret = device_register(&mdpy_dev);
+ if (ret)
+ goto err_class;
+
+ ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver,
+ mdpy_mdev_types,
+ ARRAY_SIZE(mdpy_mdev_types));
+ if (ret)
+ goto err_device;
+
+ return 0;
+
+err_device:
+ device_unregister(&mdpy_dev);
+err_class:
+ class_destroy(mdpy_class);
+err_driver:
+ mdev_unregister_driver(&mdpy_driver);
+err_cdev:
+ cdev_del(&mdpy_cdev);
+ unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
+ return ret;
+}
+
+static void __exit mdpy_dev_exit(void)
+{
+ mdpy_dev.bus = NULL;
+ mdev_unregister_parent(&mdpy_parent);
+
+ device_unregister(&mdpy_dev);
+ mdev_unregister_driver(&mdpy_driver);
+ cdev_del(&mdpy_cdev);
+ unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
+ class_destroy(mdpy_class);
+ mdpy_class = NULL;
+}
+
+module_param_named(count, mdpy_driver.max_instances, int, 0444);
+MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
+
+module_init(mdpy_dev_init)
+module_exit(mdpy_dev_exit)
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
new file mode 100644
index 000000000..e72085fc1
--- /dev/null
+++ b/samples/vfio-mdev/mtty.c
@@ -0,0 +1,1376 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Mediated virtual PCI serial host device driver
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Author: Neo Jia <cjia@nvidia.com>
+ * Kirti Wankhede <kwankhede@nvidia.com>
+ *
+ * Sample driver that creates mdev device that simulates serial port over PCI
+ * card.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/vfio.h>
+#include <linux/iommu.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <linux/file.h>
+#include <linux/mdev.h>
+#include <linux/pci.h>
+#include <linux/serial.h>
+#include <uapi/linux/serial_reg.h>
+#include <linux/eventfd.h>
+/*
+ * #defines
+ */
+
+#define VERSION_STRING "0.1"
+#define DRIVER_AUTHOR "NVIDIA Corporation"
+
+#define MTTY_CLASS_NAME "mtty"
+
+#define MTTY_NAME "mtty"
+
+#define MTTY_STRING_LEN 16
+
+#define MTTY_CONFIG_SPACE_SIZE 0xff
+#define MTTY_IO_BAR_SIZE 0x8
+#define MTTY_MMIO_BAR_SIZE 0x100000
+
+#define STORE_LE16(addr, val) (*(u16 *)addr = val)
+#define STORE_LE32(addr, val) (*(u32 *)addr = val)
+
+#define MAX_FIFO_SIZE 16
+
+#define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
+
+#define MTTY_VFIO_PCI_OFFSET_SHIFT 40
+
+#define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
+#define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
+ ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
+#define MTTY_VFIO_PCI_OFFSET_MASK \
+ (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
+#define MAX_MTTYS 24
+
+/*
+ * Global Structures
+ */
+
+static struct mtty_dev {
+ dev_t vd_devt;
+ struct class *vd_class;
+ struct cdev vd_cdev;
+ struct idr vd_idr;
+ struct device dev;
+ struct mdev_parent parent;
+} mtty_dev;
+
+struct mdev_region_info {
+ u64 start;
+ u64 phys_start;
+ u32 size;
+ u64 vfio_offset;
+};
+
+#if defined(DEBUG_REGS)
+static const char *wr_reg[] = {
+ "TX",
+ "IER",
+ "FCR",
+ "LCR",
+ "MCR",
+ "LSR",
+ "MSR",
+ "SCR"
+};
+
+static const char *rd_reg[] = {
+ "RX",
+ "IER",
+ "IIR",
+ "LCR",
+ "MCR",
+ "LSR",
+ "MSR",
+ "SCR"
+};
+#endif
+
+/* loop back buffer */
+struct rxtx {
+ u8 fifo[MAX_FIFO_SIZE];
+ u8 head, tail;
+ u8 count;
+};
+
+struct serial_port {
+ u8 uart_reg[8]; /* 8 registers */
+ struct rxtx rxtx; /* loop back buffer */
+ bool dlab;
+ bool overrun;
+ u16 divisor;
+ u8 fcr; /* FIFO control register */
+ u8 max_fifo_size;
+ u8 intr_trigger_level; /* interrupt trigger level */
+};
+
+/* State of each mdev device */
+struct mdev_state {
+ struct vfio_device vdev;
+ int irq_fd;
+ struct eventfd_ctx *intx_evtfd;
+ struct eventfd_ctx *msi_evtfd;
+ int irq_index;
+ u8 *vconfig;
+ struct mutex ops_lock;
+ struct mdev_device *mdev;
+ struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
+ u32 bar_mask[VFIO_PCI_NUM_REGIONS];
+ struct list_head next;
+ struct serial_port s[2];
+ struct mutex rxtx_lock;
+ struct vfio_device_info dev_info;
+ int nr_ports;
+};
+
+static struct mtty_type {
+ struct mdev_type type;
+ int nr_ports;
+} mtty_types[2] = {
+ { .nr_ports = 1, .type.sysfs_name = "1",
+ .type.pretty_name = "Single port serial" },
+ { .nr_ports = 2, .type.sysfs_name = "2",
+ .type.pretty_name = "Dual port serial" },
+};
+
+static struct mdev_type *mtty_mdev_types[] = {
+ &mtty_types[0].type,
+ &mtty_types[1].type,
+};
+
+static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS);
+
+static const struct file_operations vd_fops = {
+ .owner = THIS_MODULE,
+};
+
+static const struct vfio_device_ops mtty_dev_ops;
+
+/* function prototypes */
+
+static int mtty_trigger_interrupt(struct mdev_state *mdev_state);
+
+/* Helper functions */
+
+static void dump_buffer(u8 *buf, uint32_t count)
+{
+#if defined(DEBUG)
+ int i;
+
+ pr_info("Buffer:\n");
+ for (i = 0; i < count; i++) {
+ pr_info("%2x ", *(buf + i));
+ if ((i + 1) % 16 == 0)
+ pr_info("\n");
+ }
+#endif
+}
+
+static void mtty_create_config_space(struct mdev_state *mdev_state)
+{
+ /* PCI dev ID */
+ STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
+
+ /* Control: I/O+, Mem-, BusMaster- */
+ STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
+
+ /* Status: capabilities list absent */
+ STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
+
+ /* Rev ID */
+ mdev_state->vconfig[0x8] = 0x10;
+
+ /* programming interface class : 16550-compatible serial controller */
+ mdev_state->vconfig[0x9] = 0x02;
+
+ /* Sub class : 00 */
+ mdev_state->vconfig[0xa] = 0x00;
+
+ /* Base class : Simple Communication controllers */
+ mdev_state->vconfig[0xb] = 0x07;
+
+ /* base address registers */
+ /* BAR0: IO space */
+ STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
+ mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
+
+ if (mdev_state->nr_ports == 2) {
+ /* BAR1: IO space */
+ STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
+ mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
+ }
+
+ /* Subsystem ID */
+ STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
+
+ mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
+ mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
+
+ /* Vendor specific data */
+ mdev_state->vconfig[0x40] = 0x23;
+ mdev_state->vconfig[0x43] = 0x80;
+ mdev_state->vconfig[0x44] = 0x23;
+ mdev_state->vconfig[0x48] = 0x23;
+ mdev_state->vconfig[0x4c] = 0x23;
+
+ mdev_state->vconfig[0x60] = 0x50;
+ mdev_state->vconfig[0x61] = 0x43;
+ mdev_state->vconfig[0x62] = 0x49;
+ mdev_state->vconfig[0x63] = 0x20;
+ mdev_state->vconfig[0x64] = 0x53;
+ mdev_state->vconfig[0x65] = 0x65;
+ mdev_state->vconfig[0x66] = 0x72;
+ mdev_state->vconfig[0x67] = 0x69;
+ mdev_state->vconfig[0x68] = 0x61;
+ mdev_state->vconfig[0x69] = 0x6c;
+ mdev_state->vconfig[0x6a] = 0x2f;
+ mdev_state->vconfig[0x6b] = 0x55;
+ mdev_state->vconfig[0x6c] = 0x41;
+ mdev_state->vconfig[0x6d] = 0x52;
+ mdev_state->vconfig[0x6e] = 0x54;
+}
+
+static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
+ u8 *buf, u32 count)
+{
+ u32 cfg_addr, bar_mask, bar_index = 0;
+
+ switch (offset) {
+ case 0x04: /* device control */
+ case 0x06: /* device status */
+ /* do nothing */
+ break;
+ case 0x3c: /* interrupt line */
+ mdev_state->vconfig[0x3c] = buf[0];
+ break;
+ case 0x3d:
+ /*
+ * Interrupt Pin is hardwired to INTA.
+ * This field is write protected by hardware
+ */
+ break;
+ case 0x10: /* BAR0 */
+ case 0x14: /* BAR1 */
+ if (offset == 0x10)
+ bar_index = 0;
+ else if (offset == 0x14)
+ bar_index = 1;
+
+ if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
+ STORE_LE32(&mdev_state->vconfig[offset], 0);
+ break;
+ }
+
+ cfg_addr = *(u32 *)buf;
+ pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
+
+ if (cfg_addr == 0xffffffff) {
+ bar_mask = mdev_state->bar_mask[bar_index];
+ cfg_addr = (cfg_addr & bar_mask);
+ }
+
+ cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
+ STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
+ break;
+ case 0x18: /* BAR2 */
+ case 0x1c: /* BAR3 */
+ case 0x20: /* BAR4 */
+ STORE_LE32(&mdev_state->vconfig[offset], 0);
+ break;
+ default:
+ pr_info("PCI config write @0x%x of %d bytes not handled\n",
+ offset, count);
+ break;
+ }
+}
+
+static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
+ u16 offset, u8 *buf, u32 count)
+{
+ u8 data = *buf;
+
+ /* Handle data written by guest */
+ switch (offset) {
+ case UART_TX:
+ /* if DLAB set, data is LSB of divisor */
+ if (mdev_state->s[index].dlab) {
+ mdev_state->s[index].divisor |= data;
+ break;
+ }
+
+ mutex_lock(&mdev_state->rxtx_lock);
+
+ /* save in TX buffer */
+ if (mdev_state->s[index].rxtx.count <
+ mdev_state->s[index].max_fifo_size) {
+ mdev_state->s[index].rxtx.fifo[
+ mdev_state->s[index].rxtx.head] = data;
+ mdev_state->s[index].rxtx.count++;
+ CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
+ mdev_state->s[index].overrun = false;
+
+ /*
+ * Trigger interrupt if receive data interrupt is
+ * enabled and fifo reached trigger level
+ */
+ if ((mdev_state->s[index].uart_reg[UART_IER] &
+ UART_IER_RDI) &&
+ (mdev_state->s[index].rxtx.count ==
+ mdev_state->s[index].intr_trigger_level)) {
+ /* trigger interrupt */
+#if defined(DEBUG_INTR)
+ pr_err("Serial port %d: Fifo level trigger\n",
+ index);
+#endif
+ mtty_trigger_interrupt(mdev_state);
+ }
+ } else {
+#if defined(DEBUG_INTR)
+ pr_err("Serial port %d: Buffer Overflow\n", index);
+#endif
+ mdev_state->s[index].overrun = true;
+
+ /*
+ * Trigger interrupt if receiver line status interrupt
+ * is enabled
+ */
+ if (mdev_state->s[index].uart_reg[UART_IER] &
+ UART_IER_RLSI)
+ mtty_trigger_interrupt(mdev_state);
+ }
+ mutex_unlock(&mdev_state->rxtx_lock);
+ break;
+
+ case UART_IER:
+ /* if DLAB set, data is MSB of divisor */
+ if (mdev_state->s[index].dlab)
+ mdev_state->s[index].divisor |= (u16)data << 8;
+ else {
+ mdev_state->s[index].uart_reg[offset] = data;
+ mutex_lock(&mdev_state->rxtx_lock);
+ if ((data & UART_IER_THRI) &&
+ (mdev_state->s[index].rxtx.head ==
+ mdev_state->s[index].rxtx.tail)) {
+#if defined(DEBUG_INTR)
+ pr_err("Serial port %d: IER_THRI write\n",
+ index);
+#endif
+ mtty_trigger_interrupt(mdev_state);
+ }
+
+ mutex_unlock(&mdev_state->rxtx_lock);
+ }
+
+ break;
+
+ case UART_FCR:
+ mdev_state->s[index].fcr = data;
+
+ mutex_lock(&mdev_state->rxtx_lock);
+ if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
+ /* clear loop back FIFO */
+ mdev_state->s[index].rxtx.count = 0;
+ mdev_state->s[index].rxtx.head = 0;
+ mdev_state->s[index].rxtx.tail = 0;
+ }
+ mutex_unlock(&mdev_state->rxtx_lock);
+
+ switch (data & UART_FCR_TRIGGER_MASK) {
+ case UART_FCR_TRIGGER_1:
+ mdev_state->s[index].intr_trigger_level = 1;
+ break;
+
+ case UART_FCR_TRIGGER_4:
+ mdev_state->s[index].intr_trigger_level = 4;
+ break;
+
+ case UART_FCR_TRIGGER_8:
+ mdev_state->s[index].intr_trigger_level = 8;
+ break;
+
+ case UART_FCR_TRIGGER_14:
+ mdev_state->s[index].intr_trigger_level = 14;
+ break;
+ }
+
+ /*
+ * Set trigger level to 1 otherwise or implement timer with
+ * timeout of 4 characters and on expiring that timer set
+ * Recevice data timeout in IIR register
+ */
+ mdev_state->s[index].intr_trigger_level = 1;
+ if (data & UART_FCR_ENABLE_FIFO)
+ mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
+ else {
+ mdev_state->s[index].max_fifo_size = 1;
+ mdev_state->s[index].intr_trigger_level = 1;
+ }
+
+ break;
+
+ case UART_LCR:
+ if (data & UART_LCR_DLAB) {
+ mdev_state->s[index].dlab = true;
+ mdev_state->s[index].divisor = 0;
+ } else
+ mdev_state->s[index].dlab = false;
+
+ mdev_state->s[index].uart_reg[offset] = data;
+ break;
+
+ case UART_MCR:
+ mdev_state->s[index].uart_reg[offset] = data;
+
+ if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
+ (data & UART_MCR_OUT2)) {
+#if defined(DEBUG_INTR)
+ pr_err("Serial port %d: MCR_OUT2 write\n", index);
+#endif
+ mtty_trigger_interrupt(mdev_state);
+ }
+
+ if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
+ (data & (UART_MCR_RTS | UART_MCR_DTR))) {
+#if defined(DEBUG_INTR)
+ pr_err("Serial port %d: MCR RTS/DTR write\n", index);
+#endif
+ mtty_trigger_interrupt(mdev_state);
+ }
+ break;
+
+ case UART_LSR:
+ case UART_MSR:
+ /* do nothing */
+ break;
+
+ case UART_SCR:
+ mdev_state->s[index].uart_reg[offset] = data;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
+ u16 offset, u8 *buf, u32 count)
+{
+ /* Handle read requests by guest */
+ switch (offset) {
+ case UART_RX:
+ /* if DLAB set, data is LSB of divisor */
+ if (mdev_state->s[index].dlab) {
+ *buf = (u8)mdev_state->s[index].divisor;
+ break;
+ }
+
+ mutex_lock(&mdev_state->rxtx_lock);
+ /* return data in tx buffer */
+ if (mdev_state->s[index].rxtx.head !=
+ mdev_state->s[index].rxtx.tail) {
+ *buf = mdev_state->s[index].rxtx.fifo[
+ mdev_state->s[index].rxtx.tail];
+ mdev_state->s[index].rxtx.count--;
+ CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
+ }
+
+ if (mdev_state->s[index].rxtx.head ==
+ mdev_state->s[index].rxtx.tail) {
+ /*
+ * Trigger interrupt if tx buffer empty interrupt is
+ * enabled and fifo is empty
+ */
+#if defined(DEBUG_INTR)
+ pr_err("Serial port %d: Buffer Empty\n", index);
+#endif
+ if (mdev_state->s[index].uart_reg[UART_IER] &
+ UART_IER_THRI)
+ mtty_trigger_interrupt(mdev_state);
+ }
+ mutex_unlock(&mdev_state->rxtx_lock);
+
+ break;
+
+ case UART_IER:
+ if (mdev_state->s[index].dlab) {
+ *buf = (u8)(mdev_state->s[index].divisor >> 8);
+ break;
+ }
+ *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
+ break;
+
+ case UART_IIR:
+ {
+ u8 ier = mdev_state->s[index].uart_reg[UART_IER];
+ *buf = 0;
+
+ mutex_lock(&mdev_state->rxtx_lock);
+ /* Interrupt priority 1: Parity, overrun, framing or break */
+ if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
+ *buf |= UART_IIR_RLSI;
+
+ /* Interrupt priority 2: Fifo trigger level reached */
+ if ((ier & UART_IER_RDI) &&
+ (mdev_state->s[index].rxtx.count >=
+ mdev_state->s[index].intr_trigger_level))
+ *buf |= UART_IIR_RDI;
+
+ /* Interrupt priotiry 3: transmitter holding register empty */
+ if ((ier & UART_IER_THRI) &&
+ (mdev_state->s[index].rxtx.head ==
+ mdev_state->s[index].rxtx.tail))
+ *buf |= UART_IIR_THRI;
+
+ /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
+ if ((ier & UART_IER_MSI) &&
+ (mdev_state->s[index].uart_reg[UART_MCR] &
+ (UART_MCR_RTS | UART_MCR_DTR)))
+ *buf |= UART_IIR_MSI;
+
+ /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
+ if (*buf == 0)
+ *buf = UART_IIR_NO_INT;
+
+ /* set bit 6 & 7 to be 16550 compatible */
+ *buf |= 0xC0;
+ mutex_unlock(&mdev_state->rxtx_lock);
+ }
+ break;
+
+ case UART_LCR:
+ case UART_MCR:
+ *buf = mdev_state->s[index].uart_reg[offset];
+ break;
+
+ case UART_LSR:
+ {
+ u8 lsr = 0;
+
+ mutex_lock(&mdev_state->rxtx_lock);
+ /* atleast one char in FIFO */
+ if (mdev_state->s[index].rxtx.head !=
+ mdev_state->s[index].rxtx.tail)
+ lsr |= UART_LSR_DR;
+
+ /* if FIFO overrun */
+ if (mdev_state->s[index].overrun)
+ lsr |= UART_LSR_OE;
+
+ /* transmit FIFO empty and tramsitter empty */
+ if (mdev_state->s[index].rxtx.head ==
+ mdev_state->s[index].rxtx.tail)
+ lsr |= UART_LSR_TEMT | UART_LSR_THRE;
+
+ mutex_unlock(&mdev_state->rxtx_lock);
+ *buf = lsr;
+ break;
+ }
+ case UART_MSR:
+ *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
+
+ mutex_lock(&mdev_state->rxtx_lock);
+ /* if AFE is 1 and FIFO have space, set CTS bit */
+ if (mdev_state->s[index].uart_reg[UART_MCR] &
+ UART_MCR_AFE) {
+ if (mdev_state->s[index].rxtx.count <
+ mdev_state->s[index].max_fifo_size)
+ *buf |= UART_MSR_CTS | UART_MSR_DCTS;
+ } else
+ *buf |= UART_MSR_CTS | UART_MSR_DCTS;
+ mutex_unlock(&mdev_state->rxtx_lock);
+
+ break;
+
+ case UART_SCR:
+ *buf = mdev_state->s[index].uart_reg[offset];
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void mdev_read_base(struct mdev_state *mdev_state)
+{
+ int index, pos;
+ u32 start_lo, start_hi;
+ u32 mem_type;
+
+ pos = PCI_BASE_ADDRESS_0;
+
+ for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
+
+ if (!mdev_state->region_info[index].size)
+ continue;
+
+ start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
+ PCI_BASE_ADDRESS_MEM_MASK;
+ mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
+ pos += 4;
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ /* 1M mem BAR treated as 32-bit BAR */
+ default:
+ /* mem unknown type treated as 32-bit BAR */
+ start_hi = 0;
+ break;
+ }
+ pos += 4;
+ mdev_state->region_info[index].start = ((u64)start_hi << 32) |
+ start_lo;
+ }
+}
+
+static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count,
+ loff_t pos, bool is_write)
+{
+ unsigned int index;
+ loff_t offset;
+ int ret = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ mutex_lock(&mdev_state->ops_lock);
+
+ index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
+ offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+
+#if defined(DEBUG)
+ pr_info("%s: PCI config space %s at offset 0x%llx\n",
+ __func__, is_write ? "write" : "read", offset);
+#endif
+ if (is_write) {
+ dump_buffer(buf, count);
+ handle_pci_cfg_write(mdev_state, offset, buf, count);
+ } else {
+ memcpy(buf, (mdev_state->vconfig + offset), count);
+ dump_buffer(buf, count);
+ }
+
+ break;
+
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ if (!mdev_state->region_info[index].start)
+ mdev_read_base(mdev_state);
+
+ if (is_write) {
+ dump_buffer(buf, count);
+
+#if defined(DEBUG_REGS)
+ pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
+ __func__, index, offset, wr_reg[offset],
+ *buf, mdev_state->s[index].dlab);
+#endif
+ handle_bar_write(index, mdev_state, offset, buf, count);
+ } else {
+ handle_bar_read(index, mdev_state, offset, buf, count);
+ dump_buffer(buf, count);
+
+#if defined(DEBUG_REGS)
+ pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
+ __func__, index, offset, rd_reg[offset],
+ *buf, mdev_state->s[index].dlab);
+#endif
+ }
+ break;
+
+ default:
+ ret = -1;
+ goto accessfailed;
+ }
+
+ ret = count;
+
+
+accessfailed:
+ mutex_unlock(&mdev_state->ops_lock);
+
+ return ret;
+}
+
+static int mtty_init_dev(struct vfio_device *vdev)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ struct mdev_device *mdev = to_mdev_device(vdev->dev);
+ struct mtty_type *type =
+ container_of(mdev->type, struct mtty_type, type);
+ int avail_ports = atomic_read(&mdev_avail_ports);
+ int ret;
+
+ do {
+ if (avail_ports < type->nr_ports)
+ return -ENOSPC;
+ } while (!atomic_try_cmpxchg(&mdev_avail_ports,
+ &avail_ports,
+ avail_ports - type->nr_ports));
+
+ mdev_state->nr_ports = type->nr_ports;
+ mdev_state->irq_index = -1;
+ mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
+ mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
+ mutex_init(&mdev_state->rxtx_lock);
+
+ mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
+ if (!mdev_state->vconfig) {
+ ret = -ENOMEM;
+ goto err_nr_ports;
+ }
+
+ mutex_init(&mdev_state->ops_lock);
+ mdev_state->mdev = mdev;
+ mtty_create_config_space(mdev_state);
+ return 0;
+
+err_nr_ports:
+ atomic_add(type->nr_ports, &mdev_avail_ports);
+ return ret;
+}
+
+static int mtty_probe(struct mdev_device *mdev)
+{
+ struct mdev_state *mdev_state;
+ int ret;
+
+ mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
+ &mtty_dev_ops);
+ if (IS_ERR(mdev_state))
+ return PTR_ERR(mdev_state);
+
+ ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
+ if (ret)
+ goto err_put_vdev;
+ dev_set_drvdata(&mdev->dev, mdev_state);
+ return 0;
+
+err_put_vdev:
+ vfio_put_device(&mdev_state->vdev);
+ return ret;
+}
+
+static void mtty_release_dev(struct vfio_device *vdev)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+
+ atomic_add(mdev_state->nr_ports, &mdev_avail_ports);
+ kfree(mdev_state->vconfig);
+ vfio_free_device(vdev);
+}
+
+static void mtty_remove(struct mdev_device *mdev)
+{
+ struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
+
+ vfio_unregister_group_dev(&mdev_state->vdev);
+ vfio_put_device(&mdev_state->vdev);
+}
+
+static int mtty_reset(struct mdev_state *mdev_state)
+{
+ pr_info("%s: called\n", __func__);
+
+ return 0;
+}
+
+static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
+ *ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
+ *ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
+ *ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+
+read_err:
+ return -EFAULT;
+}
+
+static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
+ *ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
+ *ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
+ *ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 1;
+ }
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+write_err:
+ return -EFAULT;
+}
+
+static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
+ unsigned int index, unsigned int start,
+ unsigned int count, void *data)
+{
+ int ret = 0;
+
+ mutex_lock(&mdev_state->ops_lock);
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ {
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ pr_info("%s: disable INTx\n", __func__);
+ if (mdev_state->intx_evtfd)
+ eventfd_ctx_put(mdev_state->intx_evtfd);
+ break;
+ }
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int fd = *(int *)data;
+
+ if (fd > 0) {
+ struct eventfd_ctx *evt;
+
+ evt = eventfd_ctx_fdget(fd);
+ if (IS_ERR(evt)) {
+ ret = PTR_ERR(evt);
+ break;
+ }
+ mdev_state->intx_evtfd = evt;
+ mdev_state->irq_fd = fd;
+ mdev_state->irq_index = index;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ if (mdev_state->msi_evtfd)
+ eventfd_ctx_put(mdev_state->msi_evtfd);
+ pr_info("%s: disable MSI\n", __func__);
+ mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
+ break;
+ }
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int fd = *(int *)data;
+ struct eventfd_ctx *evt;
+
+ if (fd <= 0)
+ break;
+
+ if (mdev_state->msi_evtfd)
+ break;
+
+ evt = eventfd_ctx_fdget(fd);
+ if (IS_ERR(evt)) {
+ ret = PTR_ERR(evt);
+ break;
+ }
+ mdev_state->msi_evtfd = evt;
+ mdev_state->irq_fd = fd;
+ mdev_state->irq_index = index;
+ }
+ break;
+ }
+ break;
+ case VFIO_PCI_MSIX_IRQ_INDEX:
+ pr_info("%s: MSIX_IRQ\n", __func__);
+ break;
+ case VFIO_PCI_ERR_IRQ_INDEX:
+ pr_info("%s: ERR_IRQ\n", __func__);
+ break;
+ case VFIO_PCI_REQ_IRQ_INDEX:
+ pr_info("%s: REQ_IRQ\n", __func__);
+ break;
+ }
+
+ mutex_unlock(&mdev_state->ops_lock);
+ return ret;
+}
+
+static int mtty_trigger_interrupt(struct mdev_state *mdev_state)
+{
+ int ret = -1;
+
+ if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
+ (!mdev_state->msi_evtfd))
+ return -EINVAL;
+ else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
+ (!mdev_state->intx_evtfd)) {
+ pr_info("%s: Intr eventfd not found\n", __func__);
+ return -EINVAL;
+ }
+
+ if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
+ ret = eventfd_signal(mdev_state->msi_evtfd, 1);
+ else
+ ret = eventfd_signal(mdev_state->intx_evtfd, 1);
+
+#if defined(DEBUG_INTR)
+ pr_info("Intx triggered\n");
+#endif
+ if (ret != 1)
+ pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
+
+ return ret;
+}
+
+static int mtty_get_region_info(struct mdev_state *mdev_state,
+ struct vfio_region_info *region_info,
+ u16 *cap_type_id, void **cap_type)
+{
+ unsigned int size = 0;
+ u32 bar_index;
+
+ bar_index = region_info->index;
+ if (bar_index >= VFIO_PCI_NUM_REGIONS)
+ return -EINVAL;
+
+ mutex_lock(&mdev_state->ops_lock);
+
+ switch (bar_index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ size = MTTY_CONFIG_SPACE_SIZE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ size = MTTY_IO_BAR_SIZE;
+ break;
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ if (mdev_state->nr_ports == 2)
+ size = MTTY_IO_BAR_SIZE;
+ break;
+ default:
+ size = 0;
+ break;
+ }
+
+ mdev_state->region_info[bar_index].size = size;
+ mdev_state->region_info[bar_index].vfio_offset =
+ MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
+
+ region_info->size = size;
+ region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
+ region_info->flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ mutex_unlock(&mdev_state->ops_lock);
+ return 0;
+}
+
+static int mtty_get_irq_info(struct vfio_irq_info *irq_info)
+{
+ switch (irq_info->index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ case VFIO_PCI_REQ_IRQ_INDEX:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
+ irq_info->count = 1;
+
+ if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
+ irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
+ VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
+
+ return 0;
+}
+
+static int mtty_get_device_info(struct vfio_device_info *dev_info)
+{
+ dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
+ dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
+ dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
+
+ return 0;
+}
+
+static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mdev_state *mdev_state =
+ container_of(vdev, struct mdev_state, vdev);
+ int ret = 0;
+ unsigned long minsz;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ ret = mtty_get_device_info(&info);
+ if (ret)
+ return ret;
+
+ memcpy(&mdev_state->dev_info, &info, sizeof(info));
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+ case VFIO_DEVICE_GET_REGION_INFO:
+ {
+ struct vfio_region_info info;
+ u16 cap_type_id = 0;
+ void *cap_type = NULL;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ ret = mtty_get_region_info(mdev_state, &info, &cap_type_id,
+ &cap_type);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if ((info.argsz < minsz) ||
+ (info.index >= mdev_state->dev_info.num_irqs))
+ return -EINVAL;
+
+ ret = mtty_get_irq_info(&info);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
+ }
+ case VFIO_DEVICE_SET_IRQS:
+ {
+ struct vfio_irq_set hdr;
+ u8 *data = NULL, *ptr = NULL;
+ size_t data_size = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ ret = vfio_set_irqs_validate_and_prepare(&hdr,
+ mdev_state->dev_info.num_irqs,
+ VFIO_PCI_NUM_IRQS,
+ &data_size);
+ if (ret)
+ return ret;
+
+ if (data_size) {
+ ptr = data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+
+ ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start,
+ hdr.count, data);
+
+ kfree(ptr);
+ return ret;
+ }
+ case VFIO_DEVICE_RESET:
+ return mtty_reset(mdev_state);
+ }
+ return -ENOTTY;
+}
+
+static ssize_t
+sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
+}
+
+static DEVICE_ATTR_RO(sample_mdev_dev);
+
+static struct attribute *mdev_dev_attrs[] = {
+ &dev_attr_sample_mdev_dev.attr,
+ NULL,
+};
+
+static const struct attribute_group mdev_dev_group = {
+ .name = "vendor",
+ .attrs = mdev_dev_attrs,
+};
+
+static const struct attribute_group *mdev_dev_groups[] = {
+ &mdev_dev_group,
+ NULL,
+};
+
+static unsigned int mtty_get_available(struct mdev_type *mtype)
+{
+ struct mtty_type *type = container_of(mtype, struct mtty_type, type);
+
+ return atomic_read(&mdev_avail_ports) / type->nr_ports;
+}
+
+static const struct vfio_device_ops mtty_dev_ops = {
+ .name = "vfio-mtty",
+ .init = mtty_init_dev,
+ .release = mtty_release_dev,
+ .read = mtty_read,
+ .write = mtty_write,
+ .ioctl = mtty_ioctl,
+};
+
+static struct mdev_driver mtty_driver = {
+ .device_api = VFIO_DEVICE_API_PCI_STRING,
+ .driver = {
+ .name = "mtty",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ .dev_groups = mdev_dev_groups,
+ },
+ .probe = mtty_probe,
+ .remove = mtty_remove,
+ .get_available = mtty_get_available,
+};
+
+static void mtty_device_release(struct device *dev)
+{
+ dev_dbg(dev, "mtty: released\n");
+}
+
+static int __init mtty_dev_init(void)
+{
+ int ret = 0;
+
+ pr_info("mtty_dev: %s\n", __func__);
+
+ memset(&mtty_dev, 0, sizeof(mtty_dev));
+
+ idr_init(&mtty_dev.vd_idr);
+
+ ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1,
+ MTTY_NAME);
+
+ if (ret < 0) {
+ pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
+ return ret;
+ }
+
+ cdev_init(&mtty_dev.vd_cdev, &vd_fops);
+ cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1);
+
+ pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
+
+ ret = mdev_register_driver(&mtty_driver);
+ if (ret)
+ goto err_cdev;
+
+ mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
+
+ if (IS_ERR(mtty_dev.vd_class)) {
+ pr_err("Error: failed to register mtty_dev class\n");
+ ret = PTR_ERR(mtty_dev.vd_class);
+ goto err_driver;
+ }
+
+ mtty_dev.dev.class = mtty_dev.vd_class;
+ mtty_dev.dev.release = mtty_device_release;
+ dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
+
+ ret = device_register(&mtty_dev.dev);
+ if (ret)
+ goto err_class;
+
+ ret = mdev_register_parent(&mtty_dev.parent, &mtty_dev.dev,
+ &mtty_driver, mtty_mdev_types,
+ ARRAY_SIZE(mtty_mdev_types));
+ if (ret)
+ goto err_device;
+ return 0;
+
+err_device:
+ device_unregister(&mtty_dev.dev);
+err_class:
+ class_destroy(mtty_dev.vd_class);
+err_driver:
+ mdev_unregister_driver(&mtty_driver);
+err_cdev:
+ cdev_del(&mtty_dev.vd_cdev);
+ unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
+ return ret;
+}
+
+static void __exit mtty_dev_exit(void)
+{
+ mtty_dev.dev.bus = NULL;
+ mdev_unregister_parent(&mtty_dev.parent);
+
+ device_unregister(&mtty_dev.dev);
+ idr_destroy(&mtty_dev.vd_idr);
+ mdev_unregister_driver(&mtty_driver);
+ cdev_del(&mtty_dev.vd_cdev);
+ unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
+ class_destroy(mtty_dev.vd_class);
+ mtty_dev.vd_class = NULL;
+ pr_info("mtty_dev: Unloaded!\n");
+}
+
+module_init(mtty_dev_init)
+module_exit(mtty_dev_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
+MODULE_VERSION(VERSION_STRING);
+MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/samples/vfs/.gitignore b/samples/vfs/.gitignore
new file mode 100644
index 000000000..79212d912
--- /dev/null
+++ b/samples/vfs/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/test-fsmount
+/test-statx
diff --git a/samples/vfs/Makefile b/samples/vfs/Makefile
new file mode 100644
index 000000000..6377a6781
--- /dev/null
+++ b/samples/vfs/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+userprogs-always-y += test-fsmount test-statx
+
+userccflags += -I usr/include
diff --git a/samples/vfs/test-fsmount.c b/samples/vfs/test-fsmount.c
new file mode 100644
index 000000000..50f47b72e
--- /dev/null
+++ b/samples/vfs/test-fsmount.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* fd-based mount test.
+ *
+ * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <linux/mount.h>
+#include <linux/unistd.h>
+
+#define E(x) do { if ((x) == -1) { perror(#x); exit(1); } } while(0)
+
+static void check_messages(int fd)
+{
+ char buf[4096];
+ int err, n;
+
+ err = errno;
+
+ for (;;) {
+ n = read(fd, buf, sizeof(buf));
+ if (n < 0)
+ break;
+ n -= 2;
+
+ switch (buf[0]) {
+ case 'e':
+ fprintf(stderr, "Error: %*.*s\n", n, n, buf + 2);
+ break;
+ case 'w':
+ fprintf(stderr, "Warning: %*.*s\n", n, n, buf + 2);
+ break;
+ case 'i':
+ fprintf(stderr, "Info: %*.*s\n", n, n, buf + 2);
+ break;
+ }
+ }
+
+ errno = err;
+}
+
+static __attribute__((noreturn))
+void mount_error(int fd, const char *s)
+{
+ check_messages(fd);
+ fprintf(stderr, "%s: %m\n", s);
+ exit(1);
+}
+
+/* Hope -1 isn't a syscall */
+#ifndef __NR_fsopen
+#define __NR_fsopen -1
+#endif
+#ifndef __NR_fsmount
+#define __NR_fsmount -1
+#endif
+#ifndef __NR_fsconfig
+#define __NR_fsconfig -1
+#endif
+#ifndef __NR_move_mount
+#define __NR_move_mount -1
+#endif
+
+
+static inline int fsopen(const char *fs_name, unsigned int flags)
+{
+ return syscall(__NR_fsopen, fs_name, flags);
+}
+
+static inline int fsmount(int fsfd, unsigned int flags, unsigned int ms_flags)
+{
+ return syscall(__NR_fsmount, fsfd, flags, ms_flags);
+}
+
+static inline int fsconfig(int fsfd, unsigned int cmd,
+ const char *key, const void *val, int aux)
+{
+ return syscall(__NR_fsconfig, fsfd, cmd, key, val, aux);
+}
+
+static inline int move_mount(int from_dfd, const char *from_pathname,
+ int to_dfd, const char *to_pathname,
+ unsigned int flags)
+{
+ return syscall(__NR_move_mount,
+ from_dfd, from_pathname,
+ to_dfd, to_pathname, flags);
+}
+
+#define E_fsconfig(fd, cmd, key, val, aux) \
+ do { \
+ if (fsconfig(fd, cmd, key, val, aux) == -1) \
+ mount_error(fd, key ?: "create"); \
+ } while (0)
+
+int main(int argc, char *argv[])
+{
+ int fsfd, mfd;
+
+ /* Mount a publically available AFS filesystem */
+ fsfd = fsopen("afs", 0);
+ if (fsfd == -1) {
+ perror("fsopen");
+ exit(1);
+ }
+
+ E_fsconfig(fsfd, FSCONFIG_SET_STRING, "source", "#grand.central.org:root.cell.", 0);
+ E_fsconfig(fsfd, FSCONFIG_CMD_CREATE, NULL, NULL, 0);
+
+ mfd = fsmount(fsfd, 0, MOUNT_ATTR_RDONLY);
+ if (mfd < 0)
+ mount_error(fsfd, "fsmount");
+ E(close(fsfd));
+
+ if (move_mount(mfd, "", AT_FDCWD, "/mnt", MOVE_MOUNT_F_EMPTY_PATH) < 0) {
+ perror("move_mount");
+ exit(1);
+ }
+
+ E(close(mfd));
+ exit(0);
+}
diff --git a/samples/vfs/test-statx.c b/samples/vfs/test-statx.c
new file mode 100644
index 000000000..49c7a46ce
--- /dev/null
+++ b/samples/vfs/test-statx.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Test the statx() system call.
+ *
+ * Note that the output of this program is intended to look like the output of
+ * /bin/stat where possible.
+ *
+ * Copyright (C) 2015 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define _GNU_SOURCE
+#define _ATFILE_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <errno.h>
+#include <time.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#define statx foo
+#define statx_timestamp foo_timestamp
+struct statx;
+struct statx_timestamp;
+#include <sys/stat.h>
+#undef statx
+#undef statx_timestamp
+
+#define AT_STATX_SYNC_TYPE 0x6000
+#define AT_STATX_SYNC_AS_STAT 0x0000
+#define AT_STATX_FORCE_SYNC 0x2000
+#define AT_STATX_DONT_SYNC 0x4000
+
+#ifndef __NR_statx
+#define __NR_statx -1
+#endif
+
+static __attribute__((unused))
+ssize_t statx(int dfd, const char *filename, unsigned flags,
+ unsigned int mask, struct statx *buffer)
+{
+ return syscall(__NR_statx, dfd, filename, flags, mask, buffer);
+}
+
+static void print_time(const char *field, struct statx_timestamp *ts)
+{
+ struct tm tm;
+ time_t tim;
+ char buffer[100];
+ int len;
+
+ tim = ts->tv_sec;
+ if (!localtime_r(&tim, &tm)) {
+ perror("localtime_r");
+ exit(1);
+ }
+ len = strftime(buffer, 100, "%F %T", &tm);
+ if (len == 0) {
+ perror("strftime");
+ exit(1);
+ }
+ printf("%s", field);
+ fwrite(buffer, 1, len, stdout);
+ printf(".%09u", ts->tv_nsec);
+ len = strftime(buffer, 100, "%z", &tm);
+ if (len == 0) {
+ perror("strftime2");
+ exit(1);
+ }
+ fwrite(buffer, 1, len, stdout);
+ printf("\n");
+}
+
+static void dump_statx(struct statx *stx)
+{
+ char buffer[256], ft = '?';
+
+ printf("results=%x\n", stx->stx_mask);
+
+ printf(" ");
+ if (stx->stx_mask & STATX_SIZE)
+ printf(" Size: %-15llu", (unsigned long long)stx->stx_size);
+ if (stx->stx_mask & STATX_BLOCKS)
+ printf(" Blocks: %-10llu", (unsigned long long)stx->stx_blocks);
+ printf(" IO Block: %-6llu", (unsigned long long)stx->stx_blksize);
+ if (stx->stx_mask & STATX_TYPE) {
+ switch (stx->stx_mode & S_IFMT) {
+ case S_IFIFO: printf(" FIFO\n"); ft = 'p'; break;
+ case S_IFCHR: printf(" character special file\n"); ft = 'c'; break;
+ case S_IFDIR: printf(" directory\n"); ft = 'd'; break;
+ case S_IFBLK: printf(" block special file\n"); ft = 'b'; break;
+ case S_IFREG: printf(" regular file\n"); ft = '-'; break;
+ case S_IFLNK: printf(" symbolic link\n"); ft = 'l'; break;
+ case S_IFSOCK: printf(" socket\n"); ft = 's'; break;
+ default:
+ printf(" unknown type (%o)\n", stx->stx_mode & S_IFMT);
+ break;
+ }
+ } else {
+ printf(" no type\n");
+ }
+
+ sprintf(buffer, "%02x:%02x", stx->stx_dev_major, stx->stx_dev_minor);
+ printf("Device: %-15s", buffer);
+ if (stx->stx_mask & STATX_INO)
+ printf(" Inode: %-11llu", (unsigned long long) stx->stx_ino);
+ if (stx->stx_mask & STATX_NLINK)
+ printf(" Links: %-5u", stx->stx_nlink);
+ if (stx->stx_mask & STATX_TYPE) {
+ switch (stx->stx_mode & S_IFMT) {
+ case S_IFBLK:
+ case S_IFCHR:
+ printf(" Device type: %u,%u",
+ stx->stx_rdev_major, stx->stx_rdev_minor);
+ break;
+ }
+ }
+ printf("\n");
+
+ if (stx->stx_mask & STATX_MODE)
+ printf("Access: (%04o/%c%c%c%c%c%c%c%c%c%c) ",
+ stx->stx_mode & 07777,
+ ft,
+ stx->stx_mode & S_IRUSR ? 'r' : '-',
+ stx->stx_mode & S_IWUSR ? 'w' : '-',
+ stx->stx_mode & S_IXUSR ? 'x' : '-',
+ stx->stx_mode & S_IRGRP ? 'r' : '-',
+ stx->stx_mode & S_IWGRP ? 'w' : '-',
+ stx->stx_mode & S_IXGRP ? 'x' : '-',
+ stx->stx_mode & S_IROTH ? 'r' : '-',
+ stx->stx_mode & S_IWOTH ? 'w' : '-',
+ stx->stx_mode & S_IXOTH ? 'x' : '-');
+ if (stx->stx_mask & STATX_UID)
+ printf("Uid: %5d ", stx->stx_uid);
+ if (stx->stx_mask & STATX_GID)
+ printf("Gid: %5d\n", stx->stx_gid);
+
+ if (stx->stx_mask & STATX_ATIME)
+ print_time("Access: ", &stx->stx_atime);
+ if (stx->stx_mask & STATX_MTIME)
+ print_time("Modify: ", &stx->stx_mtime);
+ if (stx->stx_mask & STATX_CTIME)
+ print_time("Change: ", &stx->stx_ctime);
+ if (stx->stx_mask & STATX_BTIME)
+ print_time(" Birth: ", &stx->stx_btime);
+
+ if (stx->stx_attributes_mask) {
+ unsigned char bits, mbits;
+ int loop, byte;
+
+ static char attr_representation[64 + 1] =
+ /* STATX_ATTR_ flags: */
+ "????????" /* 63-56 */
+ "????????" /* 55-48 */
+ "????????" /* 47-40 */
+ "????????" /* 39-32 */
+ "????????" /* 31-24 0x00000000-ff000000 */
+ "????????" /* 23-16 0x00000000-00ff0000 */
+ "???me???" /* 15- 8 0x00000000-0000ff00 */
+ "?dai?c??" /* 7- 0 0x00000000-000000ff */
+ ;
+
+ printf("Attributes: %016llx (",
+ (unsigned long long)stx->stx_attributes);
+ for (byte = 64 - 8; byte >= 0; byte -= 8) {
+ bits = stx->stx_attributes >> byte;
+ mbits = stx->stx_attributes_mask >> byte;
+ for (loop = 7; loop >= 0; loop--) {
+ int bit = byte + loop;
+
+ if (!(mbits & 0x80))
+ putchar('.'); /* Not supported */
+ else if (bits & 0x80)
+ putchar(attr_representation[63 - bit]);
+ else
+ putchar('-'); /* Not set */
+ bits <<= 1;
+ mbits <<= 1;
+ }
+ if (byte)
+ putchar(' ');
+ }
+ printf(")\n");
+ }
+}
+
+static void dump_hex(unsigned long long *data, int from, int to)
+{
+ unsigned offset, print_offset = 1, col = 0;
+
+ from /= 8;
+ to = (to + 7) / 8;
+
+ for (offset = from; offset < to; offset++) {
+ if (print_offset) {
+ printf("%04x: ", offset * 8);
+ print_offset = 0;
+ }
+ printf("%016llx", data[offset]);
+ col++;
+ if ((col & 3) == 0) {
+ printf("\n");
+ print_offset = 1;
+ } else {
+ printf(" ");
+ }
+ }
+
+ if (!print_offset)
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ struct statx stx;
+ int ret, raw = 0, atflag = AT_SYMLINK_NOFOLLOW;
+
+ unsigned int mask = STATX_BASIC_STATS | STATX_BTIME;
+
+ for (argv++; *argv; argv++) {
+ if (strcmp(*argv, "-F") == 0) {
+ atflag &= ~AT_STATX_SYNC_TYPE;
+ atflag |= AT_STATX_FORCE_SYNC;
+ continue;
+ }
+ if (strcmp(*argv, "-D") == 0) {
+ atflag &= ~AT_STATX_SYNC_TYPE;
+ atflag |= AT_STATX_DONT_SYNC;
+ continue;
+ }
+ if (strcmp(*argv, "-L") == 0) {
+ atflag &= ~AT_SYMLINK_NOFOLLOW;
+ continue;
+ }
+ if (strcmp(*argv, "-O") == 0) {
+ mask &= ~STATX_BASIC_STATS;
+ continue;
+ }
+ if (strcmp(*argv, "-A") == 0) {
+ atflag |= AT_NO_AUTOMOUNT;
+ continue;
+ }
+ if (strcmp(*argv, "-R") == 0) {
+ raw = 1;
+ continue;
+ }
+
+ memset(&stx, 0xbf, sizeof(stx));
+ ret = statx(AT_FDCWD, *argv, atflag, mask, &stx);
+ printf("statx(%s) = %d\n", *argv, ret);
+ if (ret < 0) {
+ perror(*argv);
+ exit(1);
+ }
+
+ if (raw)
+ dump_hex((unsigned long long *)&stx, 0, sizeof(stx));
+
+ dump_statx(&stx);
+ }
+ return 0;
+}
diff --git a/samples/watch_queue/.gitignore b/samples/watch_queue/.gitignore
new file mode 100644
index 000000000..823b351d3
--- /dev/null
+++ b/samples/watch_queue/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/watch_test
diff --git a/samples/watch_queue/Makefile b/samples/watch_queue/Makefile
new file mode 100644
index 000000000..c0db3a6bc
--- /dev/null
+++ b/samples/watch_queue/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+userprogs-always-y += watch_test
+
+userccflags += -I usr/include
diff --git a/samples/watch_queue/watch_test.c b/samples/watch_queue/watch_test.c
new file mode 100644
index 000000000..8c6cb57d5
--- /dev/null
+++ b/samples/watch_queue/watch_test.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Use watch_queue API to watch for notifications.
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define _GNU_SOURCE
+#include <stdbool.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <limits.h>
+#include <linux/watch_queue.h>
+#include <linux/unistd.h>
+#include <linux/keyctl.h>
+
+#ifndef KEYCTL_WATCH_KEY
+#define KEYCTL_WATCH_KEY -1
+#endif
+#ifndef __NR_keyctl
+#define __NR_keyctl -1
+#endif
+
+#define BUF_SIZE 256
+
+static long keyctl_watch_key(int key, int watch_fd, int watch_id)
+{
+ return syscall(__NR_keyctl, KEYCTL_WATCH_KEY, key, watch_fd, watch_id);
+}
+
+static const char *key_subtypes[256] = {
+ [NOTIFY_KEY_INSTANTIATED] = "instantiated",
+ [NOTIFY_KEY_UPDATED] = "updated",
+ [NOTIFY_KEY_LINKED] = "linked",
+ [NOTIFY_KEY_UNLINKED] = "unlinked",
+ [NOTIFY_KEY_CLEARED] = "cleared",
+ [NOTIFY_KEY_REVOKED] = "revoked",
+ [NOTIFY_KEY_INVALIDATED] = "invalidated",
+ [NOTIFY_KEY_SETATTR] = "setattr",
+};
+
+static void saw_key_change(struct watch_notification *n, size_t len)
+{
+ struct key_notification *k = (struct key_notification *)n;
+
+ if (len != sizeof(struct key_notification)) {
+ fprintf(stderr, "Incorrect key message length\n");
+ return;
+ }
+
+ printf("KEY %08x change=%u[%s] aux=%u\n",
+ k->key_id, n->subtype, key_subtypes[n->subtype], k->aux);
+}
+
+/*
+ * Consume and display events.
+ */
+static void consumer(int fd)
+{
+ unsigned char buffer[433], *p, *end;
+ union {
+ struct watch_notification n;
+ unsigned char buf1[128];
+ } n;
+ ssize_t buf_len;
+
+ for (;;) {
+ buf_len = read(fd, buffer, sizeof(buffer));
+ if (buf_len == -1) {
+ perror("read");
+ exit(1);
+ }
+
+ if (buf_len == 0) {
+ printf("-- END --\n");
+ return;
+ }
+
+ if (buf_len > sizeof(buffer)) {
+ fprintf(stderr, "Read buffer overrun: %zd\n", buf_len);
+ return;
+ }
+
+ printf("read() = %zd\n", buf_len);
+
+ p = buffer;
+ end = buffer + buf_len;
+ while (p < end) {
+ size_t largest, len;
+
+ largest = end - p;
+ if (largest > 128)
+ largest = 128;
+ if (largest < sizeof(struct watch_notification)) {
+ fprintf(stderr, "Short message header: %zu\n", largest);
+ return;
+ }
+ memcpy(&n, p, largest);
+
+ printf("NOTIFY[%03zx]: ty=%06x sy=%02x i=%08x\n",
+ p - buffer, n.n.type, n.n.subtype, n.n.info);
+
+ len = n.n.info & WATCH_INFO_LENGTH;
+ if (len < sizeof(n.n) || len > largest) {
+ fprintf(stderr, "Bad message length: %zu/%zu\n", len, largest);
+ exit(1);
+ }
+
+ switch (n.n.type) {
+ case WATCH_TYPE_META:
+ switch (n.n.subtype) {
+ case WATCH_META_REMOVAL_NOTIFICATION:
+ printf("REMOVAL of watchpoint %08x\n",
+ (n.n.info & WATCH_INFO_ID) >>
+ WATCH_INFO_ID__SHIFT);
+ break;
+ case WATCH_META_LOSS_NOTIFICATION:
+ printf("-- LOSS --\n");
+ break;
+ default:
+ printf("other meta record\n");
+ break;
+ }
+ break;
+ case WATCH_TYPE_KEY_NOTIFY:
+ saw_key_change(&n.n, len);
+ break;
+ default:
+ printf("other type\n");
+ break;
+ }
+
+ p += len;
+ }
+ }
+}
+
+static struct watch_notification_filter filter = {
+ .nr_filters = 1,
+ .filters = {
+ [0] = {
+ .type = WATCH_TYPE_KEY_NOTIFY,
+ .subtype_filter[0] = UINT_MAX,
+ },
+ },
+};
+
+int main(int argc, char **argv)
+{
+ int pipefd[2], fd;
+
+ if (pipe2(pipefd, O_NOTIFICATION_PIPE) == -1) {
+ perror("pipe2");
+ exit(1);
+ }
+ fd = pipefd[0];
+
+ if (ioctl(fd, IOC_WATCH_QUEUE_SET_SIZE, BUF_SIZE) == -1) {
+ perror("watch_queue(size)");
+ exit(1);
+ }
+
+ if (ioctl(fd, IOC_WATCH_QUEUE_SET_FILTER, &filter) == -1) {
+ perror("watch_queue(filter)");
+ exit(1);
+ }
+
+ if (keyctl_watch_key(KEY_SPEC_SESSION_KEYRING, fd, 0x01) == -1) {
+ perror("keyctl");
+ exit(1);
+ }
+
+ if (keyctl_watch_key(KEY_SPEC_USER_KEYRING, fd, 0x02) == -1) {
+ perror("keyctl");
+ exit(1);
+ }
+
+ consumer(fd);
+ exit(0);
+}
diff --git a/samples/watchdog/.gitignore b/samples/watchdog/.gitignore
new file mode 100644
index 000000000..a70a0150e
--- /dev/null
+++ b/samples/watchdog/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/watchdog-simple
diff --git a/samples/watchdog/Makefile b/samples/watchdog/Makefile
new file mode 100644
index 000000000..ab39d23dc
--- /dev/null
+++ b/samples/watchdog/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+userprogs-always-y += watchdog-simple
diff --git a/samples/watchdog/watchdog-simple.c b/samples/watchdog/watchdog-simple.c
new file mode 100644
index 000000000..9ce66d2ca
--- /dev/null
+++ b/samples/watchdog/watchdog-simple.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+int main(void)
+{
+ int fd = open("/dev/watchdog", O_WRONLY);
+ int ret = 0;
+ if (fd == -1) {
+ perror("watchdog");
+ exit(EXIT_FAILURE);
+ }
+ while (1) {
+ ret = write(fd, "\0", 1);
+ if (ret != 1) {
+ ret = -1;
+ break;
+ }
+ sleep(10);
+ }
+ close(fd);
+ return ret;
+}