summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/ebpf.plugin')
-rw-r--r--collectors/ebpf.plugin/README.md8
-rw-r--r--collectors/ebpf.plugin/ebpf.c53
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf3
-rw-r--r--collectors/ebpf.plugin/ebpf.d/cachestat.conf13
-rw-r--r--collectors/ebpf.plugin/ebpf.d/dcstat.conf13
-rw-r--r--collectors/ebpf.plugin/ebpf.d/swap.conf13
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c3
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c309
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.h10
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c224
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.h9
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.c9
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c3
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c4
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c190
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.h1
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.c55
17 files changed, 856 insertions, 64 deletions
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index c32133b1c..dc406b7f8 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -29,7 +29,7 @@ Netdata uses the following features from the Linux kernel to run eBPF programs:
- Tracepoints are hooks to call specific functions. Tracepoints are more stable than `kprobes` and are preferred when
both options are available.
- Trampolines are bridges between kernel functions, and BPF programs. Netdata uses them by default whenever available.
-- Kprobes and return probes (`kretprobe`): Probes can insert virtually into any kernel instruction. When eBPF runs in `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a function is called. The user can also change configuration to use [`return`](#global) mode, and this will allow users to monitor return from these functions and detect possible failures.
+- Kprobes and return probes (`kretprobe`): Probes can insert virtually into any kernel instruction. When eBPF runs in `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a function is called. The user can also change configuration to use [`return`](#global-configuration-options) mode, and this will allow users to monitor return from these functions and detect possible failures.
In each case, wherever a normal kprobe, kretprobe, or tracepoint would have run its hook function, an eBPF program is run instead, performing various collection logic before letting the kernel continue its normal control flow.
@@ -137,7 +137,7 @@ _enable_ the integration with `cgroups.plugin`, change the `cgroups` setting to
If you do not need to monitor specific metrics for your `cgroups`, you can enable `cgroups` inside
`ebpf.d.conf`, and then disable the plugin for a specific `thread` by following the steps in the
-[Configuration](#configuration) section.
+[Configuration](#configuring-ebpfplugin) section.
#### Integration Dashboard Elements
@@ -419,7 +419,7 @@ collected in the previous and current seconds.
### System overview
Not all charts within the System Overview menu are enabled by default. Charts that rely on `kprobes` are disabled by default because they add around 100ns overhead for each function call. This is a small number from a human's perspective, but the functions are called many times and create an impact
-on host. See the [configuration](#configuration) section for details about how to enable them.
+on host. See the [configuration](#configuring-ebpfplugin) section for details about how to enable them.
#### Processes
@@ -863,7 +863,7 @@ eBPF monitoring is complex and produces a large volume of metrics. We've discove
significantly increases kernel memory usage by several hundred MB.
If your node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart,
-consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuration). Next,
+consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuring-ebpfplugin). Next,
[restart Netdata](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see if system memory
usage (see the `system.ram` chart) has dropped significantly.
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index b93c2dfd7..2b25f50a3 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -60,7 +60,7 @@ ebpf_module_t ebpf_modules[] = {
.config_file = NETDATA_CACHESTAT_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18|
NETDATA_V5_4 | NETDATA_V5_15 | NETDATA_V5_16,
- .load = EBPF_LOAD_LEGACY, .targets = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets},
{ .thread_name = "sync", .config_name = "sync", .enabled = 0, .start_routine = ebpf_sync_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
@@ -76,7 +76,7 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config,
.config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .load = EBPF_LOAD_LEGACY, .targets = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = dc_targets},
{ .thread_name = "swap", .config_name = "swap", .enabled = 0, .start_routine = ebpf_swap_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -84,7 +84,7 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config,
.config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .load = EBPF_LOAD_LEGACY, .targets = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = swap_targets},
{ .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -1083,10 +1083,32 @@ int ebpf_start_pthread_variables()
}
/**
+ * Am I collecting PIDs?
+ *
+ * Test if eBPF plugin needs to collect PID information.
+ *
+ * @return It returns 1 if at least one thread needs to collect the data, or zero otherwise.
+ */
+static inline uint32_t ebpf_am_i_collect_pids()
+{
+ uint32_t ret = 0;
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ret |= ebpf_modules[i].cgroup_charts | ebpf_modules[i].apps_charts;
+ }
+
+ return ret;
+}
+
+/**
* Allocate the vectors used for all threads.
*/
static void ebpf_allocate_common_vectors()
{
+ if (unlikely(!ebpf_am_i_collect_pids())) {
+ return;
+ }
+
all_pids = callocz((size_t)pid_max, sizeof(struct pid_stat *));
global_process_stat = callocz((size_t)ebpf_nprocs, sizeof(ebpf_process_stat_t));
}
@@ -1172,23 +1194,6 @@ static inline void epbf_update_load_mode(char *str)
ebpf_set_load_mode(load);
}
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Set default btf file
- *
- * Load the default BTF file on environment.
- */
-static void ebpf_set_default_btf_file()
-{
- char path[PATH_MAX + 1];
- snprintfz(path, PATH_MAX, "%s/vmlinux", btf_path);
- default_btf = ebpf_parse_btf_file(path);
- if (!default_btf)
- info("Your environment does not have BTF file %s/vmlinux. The plugin will work with 'legacy' code.",
- btf_path);
-}
-#endif
-
/**
* Read collector values
*
@@ -1210,10 +1215,10 @@ static void read_collector_values(int *disable_apps, int *disable_cgroups, int u
how_to_load(value);
btf_path = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_PROGRAM_PATH,
- EBPF_DEFAULT_BTF_FILE);
+ EBPF_DEFAULT_BTF_PATH);
#ifdef LIBBPF_MAJOR_VERSION
- ebpf_set_default_btf_file();
+ default_btf = ebpf_load_btf_file(btf_path, EBPF_DEFAULT_BTF_FILE);
#endif
value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, EBPF_CFG_DEFAULT_PROGRAM);
@@ -1444,8 +1449,6 @@ void set_global_variables()
/**
* Load collector config
- *
- * @param lmode the mode that will be used for them.
*/
static inline void ebpf_load_thread_config()
{
@@ -1881,6 +1884,8 @@ static void ebpf_manage_pid(pid_t pid)
*/
int main(int argc, char **argv)
{
+ clocks_init();
+
set_global_variables();
ebpf_parse_args(argc, argv);
ebpf_manage_pid(getpid());
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index 0ca9ff0d6..aeba473ed 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -21,6 +21,7 @@
cgroups = no
update every = 5
pid table size = 32768
+ btf path = /sys/kernel/btf/
#
# eBPF Programs
@@ -57,7 +58,7 @@
oomkill = yes
process = yes
shm = yes
- socket = no # Disabled while we are fixing race condition
+ socket = yes
softirq = yes
sync = yes
swap = no
diff --git a/collectors/ebpf.plugin/ebpf.d/cachestat.conf b/collectors/ebpf.plugin/ebpf.d/cachestat.conf
index 41205930a..e2418394e 100644
--- a/collectors/ebpf.plugin/ebpf.d/cachestat.conf
+++ b/collectors/ebpf.plugin/ebpf.d/cachestat.conf
@@ -10,10 +10,21 @@
#
# The `pid table size` defines the maximum number of PIDs stored inside the application hash table.
#
+# The `ebpf type format` option accepts the following values :
+# `auto` : The eBPF collector will investigate hardware and select between the two next options.
+# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
+# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
+#
+# The `ebpf co-re tracing` option accepts the following values:
+# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
+# `probe` : This is the same as legacy code.
+#
# Uncomment lines to define specific options for thread.
-#[global]
+[global]
# ebpf load mode = entry
# apps = yes
# cgroups = no
# update every = 10
# pid table size = 32768
+ ebpf type format = auto
+ ebpf co-re tracing = trampoline
diff --git a/collectors/ebpf.plugin/ebpf.d/dcstat.conf b/collectors/ebpf.plugin/ebpf.d/dcstat.conf
index a65e0acbc..3986ae4f8 100644
--- a/collectors/ebpf.plugin/ebpf.d/dcstat.conf
+++ b/collectors/ebpf.plugin/ebpf.d/dcstat.conf
@@ -8,10 +8,21 @@
# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
# the setting `apps` and `cgroups` to 'no'.
#
+# The `ebpf type format` option accepts the following values :
+# `auto` : The eBPF collector will investigate hardware and select between the two next options.
+# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
+# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
+#
+# The `ebpf co-re tracing` option accepts the following values:
+# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
+# `probe` : This is the same as legacy code.
+#
# Uncomment lines to define specific options for thread.
-#[global]
+[global]
# ebpf load mode = entry
# apps = yes
# cgroups = no
# update every = 10
# pid table size = 32768
+ ebpf type format = auto
+ ebpf co-re tracing = trampoline
diff --git a/collectors/ebpf.plugin/ebpf.d/swap.conf b/collectors/ebpf.plugin/ebpf.d/swap.conf
index a65e0acbc..3986ae4f8 100644
--- a/collectors/ebpf.plugin/ebpf.d/swap.conf
+++ b/collectors/ebpf.plugin/ebpf.d/swap.conf
@@ -8,10 +8,21 @@
# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
# the setting `apps` and `cgroups` to 'no'.
#
+# The `ebpf type format` option accepts the following values :
+# `auto` : The eBPF collector will investigate hardware and select between the two next options.
+# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
+# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
+#
+# The `ebpf co-re tracing` option accepts the following values:
+# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
+# `probe` : This is the same as legacy code.
+#
# Uncomment lines to define specific options for thread.
-#[global]
+[global]
# ebpf load mode = entry
# apps = yes
# cgroups = no
# update every = 10
# pid table size = 32768
+ ebpf type format = auto
+ ebpf co-re tracing = trampoline
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
index abc112642..2c65db8d1 100644
--- a/collectors/ebpf.plugin/ebpf_apps.c
+++ b/collectors/ebpf.plugin/ebpf_apps.c
@@ -1091,6 +1091,9 @@ static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p,
*/
void collect_data_for_all_processes(int tbl_pid_stats_fd)
{
+ if (unlikely(!all_pids))
+ return;
+
struct pid_stat *pids = root_of_pids; // global list of all processes running
while (pids) {
if (pids->updated_twice) {
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index ed4c14288..b565f635f 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -45,6 +45,248 @@ struct config cachestat_config = { .first_section = NULL,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
+netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = "mark_page_accessed", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = "mark_buffer_dirty", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
+
+#ifdef LIBBPF_MAJOR_VERSION
+#include "includes/cachestat.skel.h" // BTF code
+
+static struct cachestat_bpf *bpf_obj = NULL;
+
+/**
+ * Disable probe
+ *
+ * Disable all probes to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects
+ */
+static void ebpf_cachestat_disable_probe(struct cachestat_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_add_to_page_cache_lru_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_mark_page_accessed_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_kprobe, false);
+}
+
+/*
+ * Disable specific probe
+ *
+ * Disable probes according the kernel version
+ *
+ * @param obj is the main structure for bpf objects
+ */
+static void ebpf_cachestat_disable_specific_probe(struct cachestat_bpf *obj)
+{
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
+ } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
+ } else {
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
+ }
+}
+
+/*
+ * Disable trampoline
+ *
+ * Disable all trampoline to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_cachestat_disable_trampoline(struct cachestat_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_add_to_page_cache_lru_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_mark_page_accessed_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_fentry, false);
+}
+
+/*
+ * Disable specific trampoline
+ *
+ * Disable trampoline according to kernel version.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_cachestat_disable_specific_trampoline(struct cachestat_bpf *obj)
+{
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
+ } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
+ } else {
+ bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
+ }
+}
+
+/**
+ * Set trampoline target
+ *
+ * Set the targets we will monitor.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static inline void netdata_set_trampoline_target(struct cachestat_bpf *obj)
+{
+ bpf_program__set_attach_target(obj->progs.netdata_add_to_page_cache_lru_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].name);
+
+ bpf_program__set_attach_target(obj->progs.netdata_mark_page_accessed_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED].name);
+
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ bpf_program__set_attach_target(obj->progs.netdata_folio_mark_dirty_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ bpf_program__set_attach_target(obj->progs.netdata_set_page_dirty_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ } else {
+ bpf_program__set_attach_target(obj->progs.netdata_account_page_dirtied_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ }
+
+ bpf_program__set_attach_target(obj->progs.netdata_mark_buffer_dirty_fentry, 0,
+ cachestat_targets[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY].name);
+}
+
+/**
+ * Mount Attach Probe
+ *
+ * Attach probes to target
+ *
+ * @param obj is the main structure for bpf objects.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+static int ebpf_cachestat_attach_probe(struct cachestat_bpf *obj)
+{
+ obj->links.netdata_add_to_page_cache_lru_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_add_to_page_cache_lru_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].name);
+ int ret = libbpf_get_error(obj->links.netdata_add_to_page_cache_lru_kprobe);
+ if (ret)
+ return -1;
+
+ obj->links.netdata_mark_page_accessed_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_mark_page_accessed_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED].name);
+ ret = libbpf_get_error(obj->links.netdata_mark_page_accessed_kprobe);
+ if (ret)
+ return -1;
+
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ obj->links.netdata_folio_mark_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_folio_mark_dirty_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ ret = libbpf_get_error(obj->links.netdata_folio_mark_dirty_kprobe);
+ } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ obj->links.netdata_set_page_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_set_page_dirty_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ ret = libbpf_get_error(obj->links.netdata_set_page_dirty_kprobe);
+ } else {
+ obj->links.netdata_account_page_dirtied_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_account_page_dirtied_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
+ ret = libbpf_get_error(obj->links.netdata_account_page_dirtied_kprobe);
+ }
+
+ if (ret)
+ return -1;
+
+ obj->links.netdata_mark_buffer_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_mark_buffer_dirty_kprobe,
+ false,
+ cachestat_targets[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY].name);
+ ret = libbpf_get_error(obj->links.netdata_mark_buffer_dirty_kprobe);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Adjust Map Size
+ *
+ * Resize maps according input from users.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ */
+static void ebpf_cachestat_adjust_map_size(struct cachestat_bpf *obj, ebpf_module_t *em)
+{
+ ebpf_update_map_size(obj->maps.cstat_pid, &cachestat_maps[NETDATA_CACHESTAT_PID_STATS],
+ em, bpf_map__name(obj->maps.cstat_pid));
+}
+
+/**
+ * Set hash tables
+ *
+ * Set the values for maps according the value given by kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_cachestat_set_hash_tables(struct cachestat_bpf *obj)
+{
+ cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd = bpf_map__fd(obj->maps.cstat_global);
+ cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd = bpf_map__fd(obj->maps.cstat_pid);
+ cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd = bpf_map__fd(obj->maps.cstat_ctrl);
+}
+
+/**
+ * Load and attach
+ *
+ * Load and attach the eBPF code in kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ *
+ * @return it returns 0 on succes and -1 otherwise
+ */
+static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf_module_t *em)
+{
+ netdata_ebpf_targets_t *mt = em->targets;
+ netdata_ebpf_program_loaded_t test = mt[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].mode;
+
+ if (test == EBPF_LOAD_TRAMPOLINE) {
+ ebpf_cachestat_disable_probe(obj);
+ ebpf_cachestat_disable_specific_trampoline(obj);
+
+ netdata_set_trampoline_target(obj);
+ } else {
+ ebpf_cachestat_disable_trampoline(obj);
+ ebpf_cachestat_disable_specific_probe(obj);
+ }
+
+ int ret = cachestat_bpf__load(obj);
+ if (ret) {
+ return ret;
+ }
+
+ ebpf_cachestat_adjust_map_size(obj, em);
+
+ ret = (test == EBPF_LOAD_TRAMPOLINE) ? cachestat_bpf__attach(obj) : ebpf_cachestat_attach_probe(obj);
+ if (!ret) {
+ ebpf_cachestat_set_hash_tables(obj);
+
+ ebpf_update_controller(cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd, em);
+ }
+
+ return ret;
+}
+#endif
/*****************************************************************
*
* FUNCTIONS TO CLOSE THE THREAD
@@ -98,6 +340,10 @@ static void ebpf_cachestat_cleanup(void *ptr)
}
bpf_object__close(objects);
}
+#ifdef LIBBPF_MAJOR_VERSION
+ else if (bpf_obj)
+ cachestat_bpf__destroy(bpf_obj);
+#endif
}
/*****************************************************************
@@ -962,6 +1208,54 @@ static void ebpf_cachestat_allocate_global_vectors(int apps)
*****************************************************************/
/**
+ * Update Internal value
+ *
+ * Update values used during runtime.
+ */
+static void ebpf_cachestat_set_internal_value()
+{
+ static char *account_page[] = { "account_page_dirtied", "__set_page_dirty", "__folio_mark_dirty" };
+ if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16)
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = account_page[NETDATA_CACHESTAT_FOLIO_DIRTY];
+ else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15)
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY];
+ else
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = account_page[NETDATA_CACHESTAT_ACCOUNT_PAGE_DIRTY];
+}
+
+/*
+ * Load BPF
+ *
+ * Load BPF files.
+ *
+ * @param em the structure with configuration
+ */
+static int ebpf_cachestat_load_bpf(ebpf_module_t *em)
+{
+ int ret = 0;
+ if (em->load == EBPF_LOAD_LEGACY) {
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
+ if (!probe_links) {
+ ret = -1;
+ }
+ }
+#ifdef LIBBPF_MAJOR_VERSION
+ else {
+ bpf_obj = cachestat_bpf__open();
+ if (!bpf_obj)
+ ret = -1;
+ else
+ ret = ebpf_cachestat_load_and_attach(bpf_obj, em);
+ }
+#endif
+
+ if (ret)
+ error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name);
+
+ return ret;
+}
+
+/**
* Cachestat thread
*
* Thread used to make cachestat thread
@@ -982,17 +1276,17 @@ void *ebpf_cachestat_thread(void *ptr)
if (!em->enabled)
goto endcachestat;
- pthread_mutex_lock(&lock);
- ebpf_cachestat_allocate_global_vectors(em->apps_charts);
+ ebpf_cachestat_set_internal_value();
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
- if (!probe_links) {
- pthread_mutex_unlock(&lock);
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_adjust_thread_load(em, default_btf);
+#endif
+ if (ebpf_cachestat_load_bpf(em)) {
em->enabled = CONFIG_BOOLEAN_NO;
goto endcachestat;
}
- ebpf_update_stats(&plugin_statistics, em);
+ ebpf_cachestat_allocate_global_vectors(em->apps_charts);
int algorithms[NETDATA_CACHESTAT_END] = {
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
@@ -1002,8 +1296,9 @@ void *ebpf_cachestat_thread(void *ptr)
cachestat_counter_dimension_name, cachestat_counter_dimension_name,
algorithms, NETDATA_CACHESTAT_END);
+ pthread_mutex_lock(&lock);
+ ebpf_update_stats(&plugin_statistics, em);
ebpf_create_memory_charts(em);
-
pthread_mutex_unlock(&lock);
cachestat_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h
index 8c56d2417..b386e383c 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.h
+++ b/collectors/ebpf.plugin/ebpf_cachestat.h
@@ -45,6 +45,12 @@ enum cachestat_counters {
NETDATA_CACHESTAT_END
};
+enum cachestat_account_dirty_pages {
+ NETDATA_CACHESTAT_ACCOUNT_PAGE_DIRTY,
+ NETDATA_CACHESTAT_SET_PAGE_DIRTY,
+ NETDATA_CACHESTAT_FOLIO_DIRTY
+};
+
enum cachestat_indexes {
NETDATA_CACHESTAT_IDX_RATIO,
NETDATA_CACHESTAT_IDX_DIRTY,
@@ -54,7 +60,8 @@ enum cachestat_indexes {
enum cachestat_tables {
NETDATA_CACHESTAT_GLOBAL_STATS,
- NETDATA_CACHESTAT_PID_STATS
+ NETDATA_CACHESTAT_PID_STATS,
+ NETDATA_CACHESTAT_CTRL
};
typedef struct netdata_publish_cachestat_pid {
@@ -78,5 +85,6 @@ extern void *ebpf_cachestat_thread(void *ptr);
extern void clean_cachestat_pid_structures();
extern struct config cachestat_config;
+extern netdata_ebpf_targets_t cachestat_targets[];
#endif // NETDATA_EBPF_CACHESTAT_H
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
index fba87007f..619d8520b 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -49,6 +49,179 @@ static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_look
.retprobe = CONFIG_BOOLEAN_NO},
{.program_name = NULL}};
+netdata_ebpf_targets_t dc_targets[] = { {.name = "lookup_fast", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = "d_lookup", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
+
+#ifdef LIBBPF_MAJOR_VERSION
+#include "includes/dc.skel.h" // BTF code
+
+static struct dc_bpf *bpf_obj = NULL;
+
+/**
+ * Disable probe
+ *
+ * Disable all probes to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects
+ */
+static inline void ebpf_dc_disable_probes(struct dc_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_lookup_fast_kprobe, false);
+ bpf_program__set_autoload(obj->progs.netdata_d_lookup_kretprobe, false);
+}
+
+/*
+ * Disable trampoline
+ *
+ * Disable all trampoline to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static inline void ebpf_dc_disable_trampoline(struct dc_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_lookup_fast_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_d_lookup_fexit, false);
+}
+
+/**
+ * Set trampoline target
+ *
+ * Set the targets we will monitor.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_dc_set_trampoline_target(struct dc_bpf *obj)
+{
+ bpf_program__set_attach_target(obj->progs.netdata_lookup_fast_fentry, 0,
+ dc_targets[NETDATA_DC_TARGET_LOOKUP_FAST].name);
+
+ bpf_program__set_attach_target(obj->progs.netdata_d_lookup_fexit, 0,
+ dc_targets[NETDATA_DC_TARGET_D_LOOKUP].name);
+}
+
+/**
+ * Mount Attach Probe
+ *
+ * Attach probes to target
+ *
+ * @param obj is the main structure for bpf objects.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+static int ebpf_dc_attach_probes(struct dc_bpf *obj)
+{
+ obj->links.netdata_d_lookup_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_d_lookup_kretprobe,
+ true,
+ dc_targets[NETDATA_DC_TARGET_D_LOOKUP].name);
+ int ret = libbpf_get_error(obj->links.netdata_d_lookup_kretprobe);
+ if (ret)
+ return -1;
+
+ char *lookup_name = (dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].optional) ?
+ dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].optional :
+ dc_targets[NETDATA_DC_TARGET_LOOKUP_FAST].name ;
+
+ obj->links.netdata_lookup_fast_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_lookup_fast_kprobe,
+ false,
+ lookup_name);
+ ret = libbpf_get_error(obj->links.netdata_lookup_fast_kprobe);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Adjust Map Size
+ *
+ * Resize maps according input from users.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ */
+static void ebpf_dc_adjust_map_size(struct dc_bpf *obj, ebpf_module_t *em)
+{
+ ebpf_update_map_size(obj->maps.dcstat_pid, &dcstat_maps[NETDATA_DCSTAT_PID_STATS],
+ em, bpf_map__name(obj->maps.dcstat_pid));
+}
+
+/**
+ * Set hash tables
+ *
+ * Set the values for maps according the value given by kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_dc_set_hash_tables(struct dc_bpf *obj)
+{
+ dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd = bpf_map__fd(obj->maps.dcstat_global);
+ dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd = bpf_map__fd(obj->maps.dcstat_pid);
+ dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd = bpf_map__fd(obj->maps.dcstat_ctrl);
+}
+
+/**
+ * Update Load
+ *
+ * For directory cache, some distributions change the function name, and we do not have condition to use
+ * TRAMPOLINE like other functions.
+ *
+ * @param em structure with configuration
+ *
+ * @return When then symbols were not modified, it returns TRAMPOLINE, else it returns RETPROBE.
+ */
+netdata_ebpf_program_loaded_t ebpf_dc_update_load(ebpf_module_t *em)
+{
+ if (!strcmp(dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].optional,
+ dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].function_to_attach))
+ return EBPF_LOAD_TRAMPOLINE;
+
+ if (em->targets[NETDATA_DC_TARGET_LOOKUP_FAST].mode != EBPF_LOAD_RETPROBE)
+ info("When your kernel was compiled the symbol %s was modified, instead to use `trampoline`, the plugin will use `probes`.",
+ dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].function_to_attach);
+
+ return EBPF_LOAD_RETPROBE;
+}
+
+/**
+ * Load and attach
+ *
+ * Load and attach the eBPF code in kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ *
+ * @return it returns 0 on succes and -1 otherwise
+ */
+static inline int ebpf_dc_load_and_attach(struct dc_bpf *obj, ebpf_module_t *em)
+{
+ netdata_ebpf_program_loaded_t test = ebpf_dc_update_load(em);
+ if (test == EBPF_LOAD_TRAMPOLINE) {
+ ebpf_dc_disable_probes(obj);
+
+ ebpf_dc_set_trampoline_target(obj);
+ } else {
+ ebpf_dc_disable_trampoline(obj);
+ }
+
+ int ret = dc_bpf__load(obj);
+ if (ret) {
+ return ret;
+ }
+
+ ebpf_dc_adjust_map_size(obj, em);
+
+ ret = (test == EBPF_LOAD_TRAMPOLINE) ? dc_bpf__attach(obj) : ebpf_dc_attach_probes(obj);
+ if (!ret) {
+ ebpf_dc_set_hash_tables(obj);
+
+ ebpf_update_controller(dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd, em);
+ }
+
+ return ret;
+}
+#endif
+
/*****************************************************************
*
* COMMON FUNCTIONS
@@ -141,6 +314,10 @@ static void ebpf_dcstat_cleanup(void *ptr)
}
bpf_object__close(objects);
}
+#ifdef LIBBPF_MAJOR_VERSION
+ else if (bpf_obj)
+ dc_bpf__destroy(bpf_obj);
+#endif
}
/*****************************************************************
@@ -937,6 +1114,38 @@ static void ebpf_dcstat_allocate_global_vectors(int apps)
*
*****************************************************************/
+/*
+ * Load BPF
+ *
+ * Load BPF files.
+ *
+ * @param em the structure with configuration
+ */
+static int ebpf_dcstat_load_bpf(ebpf_module_t *em)
+{
+ int ret = 0;
+ if (em->load == EBPF_LOAD_LEGACY) {
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
+ if (!probe_links) {
+ ret = -1;
+ }
+ }
+#ifdef LIBBPF_MAJOR_VERSION
+ else {
+ bpf_obj = dc_bpf__open();
+ if (!bpf_obj)
+ ret = -1;
+ else
+ ret = ebpf_dc_load_and_attach(bpf_obj, em);
+ }
+#endif
+
+ if (ret)
+ error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name);
+
+ return ret;
+}
+
/**
* Directory Cache thread
*
@@ -960,17 +1169,16 @@ void *ebpf_dcstat_thread(void *ptr)
if (!em->enabled)
goto enddcstat;
- ebpf_dcstat_allocate_global_vectors(em->apps_charts);
-
- pthread_mutex_lock(&lock);
-
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
- if (!probe_links) {
- pthread_mutex_unlock(&lock);
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_adjust_thread_load(em, default_btf);
+#endif
+ if (ebpf_dcstat_load_bpf(em)) {
em->enabled = CONFIG_BOOLEAN_NO;
goto enddcstat;
}
+ ebpf_dcstat_allocate_global_vectors(em->apps_charts);
+
int algorithms[NETDATA_DCSTAT_IDX_END] = {
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
NETDATA_EBPF_ABSOLUTE_IDX
@@ -980,9 +1188,9 @@ void *ebpf_dcstat_thread(void *ptr)
dcstat_counter_dimension_name, dcstat_counter_dimension_name,
algorithms, NETDATA_DCSTAT_IDX_END);
+ pthread_mutex_lock(&lock);
ebpf_create_filesystem_charts(em->update_every);
ebpf_update_stats(&plugin_statistics, em);
-
pthread_mutex_unlock(&lock);
dcstat_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h
index c5e6e2bcf..940864737 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.h
+++ b/collectors/ebpf.plugin/ebpf_dcstat.h
@@ -42,7 +42,8 @@ enum directory_cache_indexes {
enum directory_cache_tables {
NETDATA_DCSTAT_GLOBAL_STATS,
- NETDATA_DCSTAT_PID_STATS
+ NETDATA_DCSTAT_PID_STATS,
+ NETDATA_DCSTAT_CTRL
};
// variables
@@ -55,6 +56,11 @@ enum directory_cache_counters {
NETDATA_DIRECTORY_CACHE_END
};
+enum directory_cache_targets {
+ NETDATA_DC_TARGET_LOOKUP_FAST,
+ NETDATA_DC_TARGET_D_LOOKUP
+};
+
typedef struct netdata_publish_dcstat_pid {
uint64_t cache_access;
uint64_t file_system;
@@ -73,5 +79,6 @@ extern void *ebpf_dcstat_thread(void *ptr);
extern void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr);
extern void clean_dcstat_pid_structures();
extern struct config dcstat_config;
+extern netdata_ebpf_targets_t dc_targets[];
#endif // NETDATA_EBPF_DCSTAT_H
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c
index f38801875..463a32904 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.c
+++ b/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -377,6 +377,15 @@ void *ebpf_oomkill_thread(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = oomkill_maps;
+ if (unlikely(!all_pids || !em->apps_charts)) {
+ // When we are not running integration with apps, we won't fill necessary variables for this thread to run, so
+ // we need to disable it.
+ if (em->enabled)
+ info("Disabling OOMKILL thread, because apps integration is completely disabled.");
+
+ em->enabled = 0;
+ }
+
if (!em->enabled) {
goto endoomkill;
}
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index d61bdf66c..f894f0707 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -579,6 +579,9 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
*/
static void ebpf_create_apps_charts(struct target *root)
{
+ if (unlikely(!all_pids))
+ return;
+
struct target *w;
int newly_added = 0;
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
index da42f0a49..7b2d4a5bf 100644
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -3965,7 +3965,9 @@ void *ebpf_socket_thread(void *ptr)
int algorithms[NETDATA_MAX_SOCKET_VECTOR] = {
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
- NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
+ NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
+ NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX,
+ NETDATA_EBPF_INCREMENTAL_IDX
};
ebpf_global_labels(
socket_aggregated_data, socket_publish_aggregated, socket_dimension_names, socket_id_names,
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
index 906c83da5..7d8423358 100644
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ b/collectors/ebpf.plugin/ebpf_swap.c
@@ -41,6 +41,154 @@ static struct bpf_object *objects = NULL;
struct netdata_static_thread swap_threads = {"SWAP KERNEL", NULL, NULL, 1,
NULL, NULL, NULL};
+netdata_ebpf_targets_t swap_targets[] = { {.name = "swap_readpage", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = "swap_writepage", .mode = EBPF_LOAD_TRAMPOLINE},
+ {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
+
+#ifdef LIBBPF_MAJOR_VERSION
+#include "includes/swap.skel.h" // BTF code
+
+static struct swap_bpf *bpf_obj = NULL;
+
+/**
+ * Disable probe
+ *
+ * Disable all probes to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects
+ */
+static void ebpf_swap_disable_probe(struct swap_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_swap_readpage_probe, false);
+ bpf_program__set_autoload(obj->progs.netdata_swap_writepage_probe, false);
+}
+
+/*
+ * Disable trampoline
+ *
+ * Disable all trampoline to use exclusively another method.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_swap_disable_trampoline(struct swap_bpf *obj)
+{
+ bpf_program__set_autoload(obj->progs.netdata_swap_readpage_fentry, false);
+ bpf_program__set_autoload(obj->progs.netdata_swap_writepage_fentry, false);
+}
+
+/**
+ * Set trampoline target
+ *
+ * Set the targets we will monitor.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_swap_set_trampoline_target(struct swap_bpf *obj)
+{
+ bpf_program__set_attach_target(obj->progs.netdata_swap_readpage_fentry, 0,
+ swap_targets[NETDATA_KEY_SWAP_READPAGE_CALL].name);
+
+ bpf_program__set_attach_target(obj->progs.netdata_swap_writepage_fentry, 0,
+ swap_targets[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name);
+}
+
+/**
+ * Mount Attach Probe
+ *
+ * Attach probes to target
+ *
+ * @param obj is the main structure for bpf objects.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+static int ebpf_swap_attach_kprobe(struct swap_bpf *obj)
+{
+ obj->links.netdata_swap_readpage_probe = bpf_program__attach_kprobe(obj->progs.netdata_swap_readpage_probe,
+ false,
+ swap_targets[NETDATA_KEY_SWAP_READPAGE_CALL].name);
+ int ret = libbpf_get_error(obj->links.netdata_swap_readpage_probe);
+ if (ret)
+ return -1;
+
+ obj->links.netdata_swap_writepage_probe = bpf_program__attach_kprobe(obj->progs.netdata_swap_writepage_probe,
+ false,
+ swap_targets[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name);
+ ret = libbpf_get_error(obj->links.netdata_swap_writepage_probe);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Set hash tables
+ *
+ * Set the values for maps according the value given by kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ */
+static void ebpf_swap_set_hash_tables(struct swap_bpf *obj)
+{
+ swap_maps[NETDATA_PID_SWAP_TABLE].map_fd = bpf_map__fd(obj->maps.tbl_pid_swap);
+ swap_maps[NETDATA_SWAP_CONTROLLER].map_fd = bpf_map__fd(obj->maps.swap_ctrl);
+ swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd = bpf_map__fd(obj->maps.tbl_swap);
+}
+
+/**
+ * Adjust Map Size
+ *
+ * Resize maps according input from users.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ */
+static void ebpf_swap_adjust_map_size(struct swap_bpf *obj, ebpf_module_t *em)
+{
+ ebpf_update_map_size(obj->maps.tbl_pid_swap, &swap_maps[NETDATA_PID_SWAP_TABLE],
+ em, bpf_map__name(obj->maps.tbl_pid_swap));
+}
+
+/**
+ * Load and attach
+ *
+ * Load and attach the eBPF code in kernel.
+ *
+ * @param obj is the main structure for bpf objects.
+ * @param em structure with configuration
+ *
+ * @return it returns 0 on succes and -1 otherwise
+ */
+static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t *em)
+{
+ netdata_ebpf_targets_t *mt = em->targets;
+ netdata_ebpf_program_loaded_t test = mt[NETDATA_KEY_SWAP_READPAGE_CALL].mode;
+
+ if (test == EBPF_LOAD_TRAMPOLINE) {
+ ebpf_swap_disable_probe(obj);
+
+ ebpf_swap_set_trampoline_target(obj);
+ } else {
+ ebpf_swap_disable_trampoline(obj);
+ }
+
+ int ret = swap_bpf__load(obj);
+ if (ret) {
+ return ret;
+ }
+
+ ebpf_swap_adjust_map_size(obj, em);
+
+ ret = (test == EBPF_LOAD_TRAMPOLINE) ? swap_bpf__attach(obj) : ebpf_swap_attach_kprobe(obj);
+ if (!ret) {
+ ebpf_swap_set_hash_tables(obj);
+
+ ebpf_update_controller(swap_maps[NETDATA_SWAP_CONTROLLER].map_fd, em);
+ }
+
+ return ret;
+}
+#endif
+
/*****************************************************************
*
* FUNCTIONS TO CLOSE THE THREAD
@@ -92,6 +240,10 @@ static void ebpf_swap_cleanup(void *ptr)
}
bpf_object__close(objects);
}
+#ifdef LIBBPF_MAJOR_VERSION
+ else if (bpf_obj)
+ swap_bpf__destroy(bpf_obj);
+#endif
}
/*****************************************************************
@@ -654,6 +806,38 @@ static void ebpf_create_swap_charts(int update_every)
update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
}
+/*
+ * Load BPF
+ *
+ * Load BPF files.
+ *
+ * @param em the structure with configuration
+ */
+static int ebpf_swap_load_bpf(ebpf_module_t *em)
+{
+ int ret = 0;
+ if (em->load == EBPF_LOAD_LEGACY) {
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
+ if (!probe_links) {
+ ret = -1;
+ }
+ }
+#ifdef LIBBPF_MAJOR_VERSION
+ else {
+ bpf_obj = swap_bpf__open();
+ if (!bpf_obj)
+ ret = -1;
+ else
+ ret = ebpf_swap_load_and_attach(bpf_obj, em);
+ }
+#endif
+
+ if (ret)
+ error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name);
+
+ return ret;
+}
+
/**
* SWAP thread
*
@@ -675,8 +859,10 @@ void *ebpf_swap_thread(void *ptr)
if (!em->enabled)
goto endswap;
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
- if (!probe_links) {
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_adjust_thread_load(em, default_btf);
+#endif
+ if (ebpf_swap_load_bpf(em)) {
em->enabled = CONFIG_BOOLEAN_NO;
goto endswap;
}
diff --git a/collectors/ebpf.plugin/ebpf_swap.h b/collectors/ebpf.plugin/ebpf_swap.h
index 1dba9c17a..31bda16a2 100644
--- a/collectors/ebpf.plugin/ebpf_swap.h
+++ b/collectors/ebpf.plugin/ebpf_swap.h
@@ -49,5 +49,6 @@ extern void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr);
extern void clean_swap_pid_structures();
extern struct config swap_config;
+extern netdata_ebpf_targets_t swap_targets[];
#endif
diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c
index 233c34a5b..b45ec86c1 100644
--- a/collectors/ebpf.plugin/ebpf_sync.c
+++ b/collectors/ebpf.plugin/ebpf_sync.c
@@ -44,13 +44,41 @@ struct config sync_config = { .first_section = NULL,
.rwlock = AVL_LOCK_INITIALIZER } };
ebpf_sync_syscalls_t local_syscalls[] = {
- {.syscall = NETDATA_SYSCALLS_SYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL},
- {.syscall = NETDATA_SYSCALLS_SYNCFS, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL},
- {.syscall = NETDATA_SYSCALLS_MSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL},
- {.syscall = NETDATA_SYSCALLS_FSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL},
- {.syscall = NETDATA_SYSCALLS_FDATASYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL},
- {.syscall = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL},
- {.syscall = NULL, .enabled = CONFIG_BOOLEAN_NO, .objects = NULL, .probe_links = NULL}
+ {.syscall = NETDATA_SYSCALLS_SYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
+#ifdef LIBBPF_MAJOR_VERSION
+ .sync_obj = NULL
+#endif
+ },
+ {.syscall = NETDATA_SYSCALLS_SYNCFS, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
+#ifdef LIBBPF_MAJOR_VERSION
+ .sync_obj = NULL
+#endif
+ },
+ {.syscall = NETDATA_SYSCALLS_MSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
+#ifdef LIBBPF_MAJOR_VERSION
+ .sync_obj = NULL
+#endif
+ },
+ {.syscall = NETDATA_SYSCALLS_FSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
+#ifdef LIBBPF_MAJOR_VERSION
+ .sync_obj = NULL
+#endif
+ },
+ {.syscall = NETDATA_SYSCALLS_FDATASYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
+#ifdef LIBBPF_MAJOR_VERSION
+ .sync_obj = NULL
+#endif
+ },
+ {.syscall = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
+#ifdef LIBBPF_MAJOR_VERSION
+ .sync_obj = NULL
+#endif
+ },
+ {.syscall = NULL, .enabled = CONFIG_BOOLEAN_NO, .objects = NULL, .probe_links = NULL,
+#ifdef LIBBPF_MAJOR_VERSION
+ .sync_obj = NULL
+#endif
+ }
};
netdata_ebpf_targets_t sync_targets[] = { {.name = NETDATA_SYSCALLS_SYNC, .mode = EBPF_LOAD_TRAMPOLINE},
@@ -228,7 +256,7 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em)
{
int i;
const char *saved_name = em->thread_name;
- sync_syscalls_index_t errors = 0;
+ int errors = 0;
for (i = 0; local_syscalls[i].syscall; i++) {
ebpf_sync_syscalls_t *w = &local_syscalls[i];
if (w->enabled) {
@@ -246,12 +274,15 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em)
if (!w->sync_obj) {
errors++;
} else {
- if (ebpf_sync_load_and_attach(w->sync_obj, em, syscall, i)) {
+ if (ebpf_is_function_inside_btf(default_btf, syscall)) {
+ if (ebpf_sync_load_and_attach(w->sync_obj, em, syscall, i)) {
+ errors++;
+ }
+ } else {
if (ebpf_sync_load_legacy(w, em))
errors++;
-
- em->thread_name = saved_name;
}
+ em->thread_name = saved_name;
}
}
#endif
@@ -263,7 +294,7 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em)
memset(sync_counter_publish_aggregated, 0 , NETDATA_SYNC_IDX_END * sizeof(netdata_publish_syscall_t));
memset(sync_hash_values, 0 , NETDATA_SYNC_IDX_END * sizeof(netdata_idx_t));
- return 0;
+ return (errors) ? -1 : 0;
}
/*****************************************************************