summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/ebpf.plugin')
-rw-r--r--collectors/ebpf.plugin/README.md2
-rw-r--r--collectors/ebpf.plugin/ebpf.c163
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf2
-rw-r--r--collectors/ebpf.plugin/ebpf.h13
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c8
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c8
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.c11
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.c7
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.c30
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.h1
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.c7
-rw-r--r--collectors/ebpf.plugin/ebpf_mdflush.c14
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.c7
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.c10
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c111
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h15
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.c7
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c9
-rw-r--r--collectors/ebpf.plugin/ebpf_softirq.c7
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c7
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.c8
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.c7
24 files changed, 364 insertions, 94 deletions
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index 60f1fd742..8fc6809e2 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -605,7 +605,7 @@ The eBPF collector enables and runs the following eBPF programs by default:
- `fd` : This eBPF program creates charts that show information about calls to open files.
- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2).
- `shm`: This eBPF program creates charts that show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2).
-- `sync`: Montitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
+- `sync`: Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
bandwidth consumed by each.
- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index 71a13e84f..d51d378bd 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -6,47 +6,10 @@
#include "ebpf.h"
#include "ebpf_socket.h"
+#include "libnetdata/required_dummies.h"
/*****************************************************************
*
- * FUNCTIONS USED BY NETDATA
- *
- *****************************************************************/
-
-// callback required by eval()
-int health_variable_lookup(const char *variable, uint32_t hash, struct rrdcalc *rc, calculated_number *result)
-{
- UNUSED(variable);
- UNUSED(hash);
- UNUSED(rc);
- UNUSED(result);
- return 0;
-};
-
-void send_statistics(const char *action, const char *action_result, const char *action_data)
-{
- UNUSED(action);
- UNUSED(action_result);
- UNUSED(action_data);
-}
-
-// callbacks required by popen()
-void signals_block(void){};
-void signals_unblock(void){};
-void signals_reset(void){};
-
-// required by get_system_cpus()
-char *netdata_configured_host_prefix = "";
-
-// callback required by fatal()
-void netdata_cleanup_and_exit(int ret)
-{
- exit(ret);
-}
-
-// ----------------------------------------------------------------------
-/*****************************************************************
- *
* GLOBAL VARIABLES
*
*****************************************************************/
@@ -64,7 +27,6 @@ struct config collector_config = { .first_section = NULL,
.rwlock = AVL_LOCK_INITIALIZER } };
int running_on_kernel = 0;
-char kernel_string[64];
int ebpf_nprocs;
int isrh = 0;
uint32_t finalized_threads = 1;
@@ -79,94 +41,127 @@ ebpf_module_t ebpf_modules[] = {
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = ebpf_process_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config,
- .config_file = NETDATA_PROCESS_CONFIG_FILE},
+ .config_file = NETDATA_PROCESS_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "socket", .config_name = "socket", .enabled = 0, .start_routine = ebpf_socket_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = ebpf_socket_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config,
- .config_file = NETDATA_NETWORK_CONFIG_FILE},
+ .config_file = NETDATA_NETWORK_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "cachestat", .config_name = "cachestat", .enabled = 0, .start_routine = ebpf_cachestat_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = ebpf_cachestat_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config,
- .config_file = NETDATA_CACHESTAT_CONFIG_FILE},
+ .config_file = NETDATA_CACHESTAT_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_15,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "sync", .config_name = "sync", .enabled = 0, .start_routine = ebpf_sync_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config,
- .config_file = NETDATA_SYNC_CONFIG_FILE},
+ .config_file = NETDATA_SYNC_CONFIG_FILE,
+ // All syscalls have the same kernels
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "dc", .config_name = "dc", .enabled = 0, .start_routine = ebpf_dcstat_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = ebpf_dcstat_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config,
- .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE},
+ .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "swap", .config_name = "swap", .enabled = 0, .start_routine = ebpf_swap_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = ebpf_swap_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config,
- .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE},
+ .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
- .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE },
+ .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "filesystem", .config_name = "filesystem", .enabled = 0, .start_routine = ebpf_filesystem_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config,
- .config_file = NETDATA_FILESYSTEM_CONFIG_FILE},
+ .config_file = NETDATA_FILESYSTEM_CONFIG_FILE,
+ //We are setting kernels as zero, because we load eBPF programs according the kernel running.
+ .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL },
{ .thread_name = "disk", .config_name = "disk", .enabled = 0, .start_routine = ebpf_disk_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config,
- .config_file = NETDATA_DISK_CONFIG_FILE},
+ .config_file = NETDATA_DISK_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "mount", .config_name = "mount", .enabled = 0, .start_routine = ebpf_mount_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config,
- .config_file = NETDATA_MOUNT_CONFIG_FILE},
+ .config_file = NETDATA_MOUNT_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "fd", .config_name = "fd", .enabled = 0, .start_routine = ebpf_fd_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = ebpf_fd_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config,
- .config_file = NETDATA_FD_CONFIG_FILE},
+ .config_file = NETDATA_FD_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_11,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "hardirq", .config_name = "hardirq", .enabled = 0, .start_routine = ebpf_hardirq_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config,
- .config_file = NETDATA_HARDIRQ_CONFIG_FILE},
+ .config_file = NETDATA_HARDIRQ_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "softirq", .config_name = "softirq", .enabled = 0, .start_routine = ebpf_softirq_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config,
- .config_file = NETDATA_SOFTIRQ_CONFIG_FILE},
+ .config_file = NETDATA_SOFTIRQ_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "oomkill", .config_name = "oomkill", .enabled = 0, .start_routine = ebpf_oomkill_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = ebpf_oomkill_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config,
- .config_file = NETDATA_OOMKILL_CONFIG_FILE},
+ .config_file = NETDATA_OOMKILL_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "shm", .config_name = "shm", .enabled = 0, .start_routine = ebpf_shm_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = ebpf_shm_create_apps_charts, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config,
- .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE},
+ .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
{ .thread_name = "mdflush", .config_name = "mdflush", .enabled = 0, .start_routine = ebpf_mdflush_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config,
- .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE},
- { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY,
+ .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE,
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .load = EBPF_LOAD_LEGACY, .targets = NULL},
+ { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY,
.global_charts = 0, .apps_charts = CONFIG_BOOLEAN_NO, .cgroup_charts = CONFIG_BOOLEAN_NO,
.mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL,
- .cfg = NULL, .config_name = NULL},
+ .cfg = NULL, .config_name = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL},
};
// Link with apps.plugin
@@ -181,6 +176,10 @@ pthread_mutex_t mutex_cgroup_shm;
//Network viewer
ebpf_network_viewer_options_t network_viewer_opt;
+// Statistic
+ebpf_plugin_stats_t plugin_statistics = {.core = 0, .legacy = 0, .running = 0, .threads = 0, .tracepoints = 0,
+ .probes = 0, .retprobes = 0, .trampolines = 0};
+
/*****************************************************************
*
* FUNCTIONS USED TO CLEAN MEMORY AND OPERATE SYSTEM FILES
@@ -764,6 +763,20 @@ static inline void ebpf_disable_cgroups()
}
/**
+ * Update Disabled Plugins
+ *
+ * This function calls ebpf_update_stats to update statistics for collector.
+ *
+ * @param em a pointer to `struct ebpf_module`
+ */
+void ebpf_update_disabled_plugin_stats(ebpf_module_t *em)
+{
+ pthread_mutex_lock(&lock);
+ ebpf_update_stats(&plugin_statistics, em);
+ pthread_mutex_unlock(&lock);
+}
+
+/**
* Print help on standard error for user knows how to use the collector.
*/
void ebpf_print_help()
@@ -829,6 +842,10 @@ void ebpf_print_help()
" [-]-swap Enable chart related to swap run time.\n"
"\n"
" [-]-vfs Enable chart related to vfs run time.\n"
+ "\n"
+ " [-]-legacy Load legacy eBPF programs.\n"
+ "\n"
+ " [-]-core Use CO-RE when available(Working in progress).\n"
"\n",
VERSION,
(year >= 116) ? year + 1900 : 2020);
@@ -1339,7 +1356,6 @@ void set_global_variables()
isrh = get_redhat_release();
pid_max = get_system_pid_max();
running_on_kernel = ebpf_get_kernel_version();
- ebpf_update_kernel(kernel_string, 63, isrh, running_on_kernel);
}
/**
@@ -1356,6 +1372,19 @@ static inline void ebpf_load_thread_config()
}
/**
+ * Set Load mode
+ *
+ * @param load default load mode.
+ */
+static inline void ebpf_set_load_mode(netdata_ebpf_load_mode_t load)
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].load = load;
+ }
+}
+
+/**
* Parse arguments given from user.
*
* @param argc the number of arguments
@@ -1391,6 +1420,8 @@ static void ebpf_parse_args(int argc, char **argv)
{"help", no_argument, 0, 0 },
{"global", no_argument, 0, 0 },
{"return", no_argument, 0, 0 },
+ {"legacy", no_argument, 0, 0 },
+ {"core", no_argument, 0, 0 },
{0, 0, 0, 0}
};
@@ -1567,6 +1598,20 @@ static void ebpf_parse_args(int argc, char **argv)
#endif
break;
}
+ case EBPF_OPTION_LEGACY: {
+ ebpf_set_load_mode(EBPF_LOAD_LEGACY);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with \"LEGACY\" code, because it was started with the option \"[-]-legacy\".");
+#endif
+ break;
+ }
+ case EBPF_OPTION_CORE: {
+ ebpf_set_load_mode(EBPF_LOAD_CORE);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with \"CO-RE\" code, because it was started with the option \"[-]-core\".");
+#endif
+ break;
+ }
default: {
break;
}
@@ -1579,8 +1624,6 @@ static void ebpf_parse_args(int argc, char **argv)
if (disable_cgroups)
ebpf_disable_cgroups();
-
- ebpf_enable_all_charts(disable_apps, disable_cgroups);
}
if (select_threads) {
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index 845b711c9..633c53791 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -41,7 +41,7 @@
# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
# bandwidth consumed by each.
# `softirq` : Monitor latency of serving software interrupt requests (soft IRQs).
-# `sync` : Montitor calls for syscall sync(2).
+# `sync` : Monitor calls for syscall sync(2).
# `swap` : Monitor calls for internal swap functions.
# `vfs` : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and
# files removed.
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index a59bad031..165a6bf36 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -27,6 +27,7 @@
#include "libnetdata/config/appconfig.h"
#include "libnetdata/ebpf/ebpf.h"
#include "libnetdata/procfile/procfile.h"
+#include "collectors/cgroups.plugin/sys_fs_cgroup.h"
#include "daemon/main.h"
#include "ebpf_apps.h"
@@ -96,7 +97,9 @@ enum ebpf_main_index {
EBPF_OPTION_VERSION,
EBPF_OPTION_HELP,
EBPF_OPTION_GLOBAL_CHART,
- EBPF_OPTION_RETURN_MODE
+ EBPF_OPTION_RETURN_MODE,
+ EBPF_OPTION_LEGACY,
+ EBPF_OPTION_CORE
};
typedef struct ebpf_tracepoint {
@@ -126,6 +129,11 @@ typedef struct ebpf_tracepoint {
#define NETDATA_SYSTEM_SWAP_SUBMENU "swap"
#define NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU "swap (eBPF)"
#define NETDATA_SYSTEM_IPC_SHM_SUBMENU "ipc shared memory"
+#define NETDATA_MONITORING_FAMILY "netdata"
+
+// Statistics charts
+#define NETDATA_EBPF_THREADS "ebpf_threads"
+#define NETDATA_EBPF_LOAD_METHOD "ebpf_load_methods"
// Log file
#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
@@ -159,7 +167,6 @@ extern int ebpf_nprocs;
extern int running_on_kernel;
extern int isrh;
extern char *ebpf_plugin_dir;
-extern char kernel_string[64];
extern pthread_mutex_t collect_data_mutex;
extern pthread_cond_t collect_data_cond_var;
@@ -256,6 +263,7 @@ extern sem_t *shm_sem_ebpf_cgroup;
extern pthread_mutex_t mutex_cgroup_shm;
extern size_t all_pids_count;
extern uint32_t finalized_threads;
+extern ebpf_plugin_stats_t plugin_statistics;
// Socket functions and variables
// Common functions
@@ -268,6 +276,7 @@ extern void ebpf_update_pid_table(ebpf_local_maps_t *pid, ebpf_module_t *em);
extern void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family,
char *charttype, char *context, int order, int update_every);
extern void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end);
+void ebpf_update_disabled_plugin_stats(ebpf_module_t *em);
#define EBPF_MAX_SYNCHRONIZATION_TIME 300
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index 7ba8c01ae..0cc16234b 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -985,12 +985,15 @@ void *ebpf_cachestat_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_cachestat_allocate_global_vectors(em->apps_charts);
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
pthread_mutex_unlock(&lock);
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endcachestat;
}
+ ebpf_update_stats(&plugin_statistics, em);
+
int algorithms[NETDATA_CACHESTAT_END] = {
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
};
@@ -1006,6 +1009,9 @@ void *ebpf_cachestat_thread(void *ptr)
cachestat_collector(em);
endcachestat:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h
index 7904c8113..8c56d2417 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.h
+++ b/collectors/ebpf.plugin/ebpf_cachestat.h
@@ -52,7 +52,7 @@ enum cachestat_indexes {
NETDATA_CACHESTAT_IDX_MISS
};
-enum cachesta_tables {
+enum cachestat_tables {
NETDATA_CACHESTAT_GLOBAL_STATS,
NETDATA_CACHESTAT_PID_STATS
};
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
index 7ae821889..820e24e39 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -964,9 +964,10 @@ void *ebpf_dcstat_thread(void *ptr)
pthread_mutex_lock(&lock);
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
pthread_mutex_unlock(&lock);
+ em->enabled = CONFIG_BOOLEAN_NO;
goto enddcstat;
}
@@ -980,11 +981,16 @@ void *ebpf_dcstat_thread(void *ptr)
algorithms, NETDATA_DCSTAT_IDX_END);
ebpf_create_filesystem_charts(em->update_every);
+ ebpf_update_stats(&plugin_statistics, em);
+
pthread_mutex_unlock(&lock);
dcstat_collector(em);
enddcstat:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c
index 6e139ec9f..3ddf50b93 100644
--- a/collectors/ebpf.plugin/ebpf_disk.c
+++ b/collectors/ebpf.plugin/ebpf_disk.c
@@ -817,12 +817,14 @@ void *ebpf_disk_thread(void *ptr)
}
if (pthread_mutex_init(&plot_mutex, NULL)) {
+ em->enabled = 0;
error("Cannot initialize local mutex");
goto enddisk;
}
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = 0;
goto enddisk;
}
@@ -833,9 +835,16 @@ void *ebpf_disk_thread(void *ptr)
ebpf_global_labels(disk_aggregated_data, disk_publish_aggregated, dimensions, dimensions, algorithms,
NETDATA_EBPF_HIST_MAX_BINS);
+ pthread_mutex_lock(&lock);
+ ebpf_update_stats(&plugin_statistics, em);
+ pthread_mutex_unlock(&lock);
+
disk_collector(em);
enddisk:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c
index 6eecf5847..10a50c4eb 100644
--- a/collectors/ebpf.plugin/ebpf_fd.c
+++ b/collectors/ebpf.plugin/ebpf_fd.c
@@ -841,8 +841,9 @@ void *ebpf_fd_thread(void *ptr)
ebpf_fd_allocate_global_vectors(em->apps_charts);
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endfd;
}
@@ -855,11 +856,15 @@ void *ebpf_fd_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_fd_global_charts(em);
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
fd_collector(em);
endfd:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c
index ad2c9eff0..415a42dbc 100644
--- a/collectors/ebpf.plugin/ebpf_filesystem.c
+++ b/collectors/ebpf.plugin/ebpf_filesystem.c
@@ -38,7 +38,8 @@ ebpf_filesystem_partitions_t localfs[] =
.probe_links = NULL,
.flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
.enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0}},
+ .addresses = {.function = NULL, .addr = 0},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4},
{.filesystem = "xfs",
.optional_filesystem = NULL,
.family = "xfs",
@@ -46,7 +47,8 @@ ebpf_filesystem_partitions_t localfs[] =
.probe_links = NULL,
.flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
.enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0}},
+ .addresses = {.function = NULL, .addr = 0},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4},
{.filesystem = "nfs",
.optional_filesystem = "nfs4",
.family = "nfs",
@@ -54,7 +56,8 @@ ebpf_filesystem_partitions_t localfs[] =
.probe_links = NULL,
.flags = NETDATA_FILESYSTEM_ATTR_CHARTS,
.enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0}},
+ .addresses = {.function = NULL, .addr = 0},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4},
{.filesystem = "zfs",
.optional_filesystem = NULL,
.family = "zfs",
@@ -62,7 +65,8 @@ ebpf_filesystem_partitions_t localfs[] =
.probe_links = NULL,
.flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
.enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0}},
+ .addresses = {.function = NULL, .addr = 0},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4},
{.filesystem = "btrfs",
.optional_filesystem = NULL,
.family = "btrfs",
@@ -70,7 +74,8 @@ ebpf_filesystem_partitions_t localfs[] =
.probe_links = NULL,
.flags = NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE,
.enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = "btrfs_file_operations", .addr = 0}},
+ .addresses = {.function = "btrfs_file_operations", .addr = 0},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10},
{.filesystem = NULL,
.optional_filesystem = NULL,
.family = NULL,
@@ -78,7 +83,8 @@ ebpf_filesystem_partitions_t localfs[] =
.probe_links = NULL,
.flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
.enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0}}};
+ .addresses = {.function = NULL, .addr = 0},
+ .kernels = 0}};
struct netdata_static_thread filesystem_threads = {"EBPF FS READ",
NULL, NULL, 1, NULL,
@@ -224,13 +230,16 @@ int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em)
{
int i;
const char *saved_name = em->thread_name;
+ uint64_t kernels = em->kernels;
for (i = 0; localfs[i].filesystem; i++) {
ebpf_filesystem_partitions_t *efp = &localfs[i];
if (!efp->probe_links && efp->flags & NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM) {
em->thread_name = efp->filesystem;
- efp->probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &efp->objects);
+ em->kernels = efp->kernels;
+ efp->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &efp->objects);
if (!efp->probe_links) {
em->thread_name = saved_name;
+ em->kernels = kernels;
return -1;
}
efp->flags |= NETDATA_FILESYSTEM_FLAG_HAS_PARTITION;
@@ -243,6 +252,7 @@ int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em)
efp->flags &= ~NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
}
em->thread_name = saved_name;
+ em->kernels = kernels;
if (!dimensions) {
dimensions = ebpf_fill_histogram_dimension(NETDATA_EBPF_HIST_MAX_BINS);
@@ -640,7 +650,7 @@ void *ebpf_filesystem_thread(void *ptr)
if (em->optional)
info("Netdata cannot monitor the filesystems used on this host.");
- em->enabled = 0;
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endfilesystem;
}
@@ -651,11 +661,15 @@ void *ebpf_filesystem_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_fs_charts(em->update_every);
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
filesystem_collector(em);
endfilesystem:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.h b/collectors/ebpf.plugin/ebpf_filesystem.h
index 295eec205..8b7c54c58 100644
--- a/collectors/ebpf.plugin/ebpf_filesystem.h
+++ b/collectors/ebpf.plugin/ebpf_filesystem.h
@@ -60,6 +60,7 @@ typedef struct ebpf_filesystem_partitions {
uint32_t enabled;
ebpf_addresses_t addresses;
+ uint64_t kernels;
} ebpf_filesystem_partitions_t;
extern void *ebpf_filesystem_thread(void *ptr);
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c
index ff649e9cd..25b2a0ec6 100644
--- a/collectors/ebpf.plugin/ebpf_hardirq.c
+++ b/collectors/ebpf.plugin/ebpf_hardirq.c
@@ -428,6 +428,7 @@ static void hardirq_collector(ebpf_module_t *em)
pthread_mutex_lock(&lock);
hardirq_create_charts(em->update_every);
hardirq_create_static_dims();
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
// loop and read from published data until ebpf plugin is closed.
@@ -480,14 +481,18 @@ void *ebpf_hardirq_thread(void *ptr)
goto endhardirq;
}
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endhardirq;
}
hardirq_collector(em);
endhardirq:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c
index e2420ec8e..9f75543d7 100644
--- a/collectors/ebpf.plugin/ebpf_mdflush.c
+++ b/collectors/ebpf.plugin/ebpf_mdflush.c
@@ -256,6 +256,7 @@ static void mdflush_collector(ebpf_module_t *em)
// create chart and static dims.
pthread_mutex_lock(&lock);
mdflush_create_charts(em->update_every);
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
// loop and read from published data until ebpf plugin is closed.
@@ -294,18 +295,29 @@ void *ebpf_mdflush_thread(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = mdflush_maps;
+ char *md_flush_request = ebpf_find_symbol("md_flush_request");
+ if (!md_flush_request) {
+ em->enabled = CONFIG_BOOLEAN_NO;
+ error("Cannot monitor MD devices, because md is not loaded.");
+ }
+ freez(md_flush_request);
+
if (!em->enabled) {
goto endmdflush;
}
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endmdflush;
}
mdflush_collector(em);
endmdflush:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c
index 46f323471..666936d86 100644
--- a/collectors/ebpf.plugin/ebpf_mount.c
+++ b/collectors/ebpf.plugin/ebpf_mount.c
@@ -238,8 +238,9 @@ void *ebpf_mount_thread(void *ptr)
if (!em->enabled)
goto endmount;
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endmount;
}
@@ -250,11 +251,15 @@ void *ebpf_mount_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_mount_charts(em->update_every);
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
mount_collector(em);
endmount:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c
index 7f7df36f9..434fc7fd3 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.c
+++ b/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -386,14 +386,22 @@ void *ebpf_oomkill_thread(void *ptr)
goto endoomkill;
}
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endoomkill;
}
+ pthread_mutex_lock(&lock);
+ ebpf_update_stats(&plugin_statistics, em);
+ pthread_mutex_unlock(&lock);
+
oomkill_collector(em);
endoomkill:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index a4a6709e8..d61bdf66c 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -58,6 +58,10 @@ struct config process_config = { .first_section = NULL,
static struct netdata_static_thread cgroup_thread = {"EBPF CGROUP", NULL, NULL,
1, NULL, NULL, NULL};
+
+static char *threads_stat[NETDATA_EBPF_THREAD_STAT_END] = {"total", "running"};
+static char *load_event_stat[NETDATA_EBPF_LOAD_STAT_END] = {"legacy", "co-re"};
+
/*****************************************************************
*
* PROCESS DATA AND SEND TO NETDATA
@@ -435,6 +439,78 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
}
/**
+ * Create chart for Statistic Thread
+ *
+ * Write to standard output current values for threads.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static inline void ebpf_create_statistic_thread_chart(ebpf_module_t *em)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ NETDATA_EBPF_THREADS,
+ "Threads info.",
+ "threads",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ 140000,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(threads_stat[NETDATA_EBPF_THREAD_STAT_TOTAL],
+ threads_stat[NETDATA_EBPF_THREAD_STAT_TOTAL],
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+
+ ebpf_write_global_dimension(threads_stat[NETDATA_EBPF_THREAD_STAT_RUNNING],
+ threads_stat[NETDATA_EBPF_THREAD_STAT_RUNNING],
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+}
+
+/**
+ * Create chart for Load Thread
+ *
+ * Write to standard output current values for load mode.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static inline void ebpf_create_statistic_load_chart(ebpf_module_t *em)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ NETDATA_EBPF_LOAD_METHOD,
+ "Load info.",
+ "methods",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ 140001,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY],
+ load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY],
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+
+ ebpf_write_global_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE],
+ load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE],
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+}
+
+/**
+ * Create Statistics Charts
+ *
+ * Create charts that will show statistics related to eBPF plugin.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static void ebpf_create_statistic_charts(ebpf_module_t *em)
+{
+ ebpf_create_statistic_thread_chart(em);
+
+ ebpf_create_statistic_load_chart(em);
+}
+
+/**
* Create process apps charts
*
* Call ebpf_create_chart to create the charts on apps submenu.
@@ -913,6 +989,24 @@ void ebpf_process_update_cgroup_algorithm()
}
/**
+ * Send Statistic Data
+ *
+ * Send statistic information to netdata.
+ */
+void ebpf_send_statistic_data()
+{
+ write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_THREADS);
+ write_chart_dimension(threads_stat[NETDATA_EBPF_THREAD_STAT_TOTAL], (long long)plugin_statistics.threads);
+ write_chart_dimension(threads_stat[NETDATA_EBPF_THREAD_STAT_RUNNING], (long long)plugin_statistics.running);
+ write_end_chart();
+
+ write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LOAD_METHOD);
+ write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY], (long long)plugin_statistics.legacy);
+ write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE], (long long)plugin_statistics.core);
+ write_end_chart();
+}
+
+/**
* Main loop for this collector.
*
* @param em the structure with thread information
@@ -967,8 +1061,10 @@ static void process_collector(ebpf_module_t *em)
}
}
+ pthread_mutex_lock(&lock);
+ ebpf_send_statistic_data();
+
if (thread_enabled) {
- pthread_mutex_lock(&lock);
if (publish_global) {
ebpf_process_send_data(em);
}
@@ -980,12 +1076,10 @@ static void process_collector(ebpf_module_t *em)
if (cgroups) {
ebpf_process_send_cgroup_data(em);
}
- pthread_mutex_unlock(&lock);
}
+ pthread_mutex_unlock(&lock);
}
- pthread_mutex_unlock(&lock);
-
fflush(stdout);
}
}
@@ -1212,8 +1306,9 @@ void *ebpf_process_thread(void *ptr)
ebpf_update_pid_table(&process_maps[0], em);
set_local_pointers();
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
pthread_mutex_unlock(&lock);
goto endprocess;
}
@@ -1230,11 +1325,17 @@ void *ebpf_process_thread(void *ptr)
ebpf_create_global_charts(em);
}
+ ebpf_update_stats(&plugin_statistics, em);
+ ebpf_create_statistic_charts(em);
+
pthread_mutex_unlock(&lock);
process_collector(em);
endprocess:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
wait_for_all_threads_die();
netdata_thread_cleanup_pop(1);
return NULL;
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
index 73421049e..b0377b5db 100644
--- a/collectors/ebpf.plugin/ebpf_process.h
+++ b/collectors/ebpf.plugin/ebpf_process.h
@@ -39,6 +39,21 @@
#define NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT "services.task_exit"
#define NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT "services.task_error"
+// Statistical information
+enum netdata_ebpf_thread_stats{
+ NETDATA_EBPF_THREAD_STAT_TOTAL,
+ NETDATA_EBPF_THREAD_STAT_RUNNING,
+
+ NETDATA_EBPF_THREAD_STAT_END
+};
+
+enum netdata_ebpf_load_mode_stats{
+ NETDATA_EBPF_LOAD_STAT_LEGACY,
+ NETDATA_EBPF_LOAD_STAT_CORE,
+
+ NETDATA_EBPF_LOAD_STAT_END
+};
+
// Index from kernel
typedef enum ebpf_process_index {
NETDATA_KEY_CALLS_DO_EXIT,
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c
index 156ae9aa5..0cfb3abdc 100644
--- a/collectors/ebpf.plugin/ebpf_shm.c
+++ b/collectors/ebpf.plugin/ebpf_shm.c
@@ -821,8 +821,9 @@ void *ebpf_shm_thread(void *ptr)
goto endshm;
}
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endshm;
}
@@ -845,11 +846,15 @@ void *ebpf_shm_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_shm_charts(em->update_every);
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
shm_collector(em);
endshm:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
index f7710ff22..b59367f58 100644
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -3363,6 +3363,7 @@ void *ebpf_socket_thread(void *ptr)
goto endsocket;
if (pthread_mutex_init(&nv_mutex, NULL)) {
+ em->enabled = CONFIG_BOOLEAN_NO;
error("Cannot initialize local mutex");
goto endsocket;
}
@@ -3374,8 +3375,9 @@ void *ebpf_socket_thread(void *ptr)
if (running_on_kernel < NETDATA_EBPF_KERNEL_5_0)
em->mode = MODE_ENTRY;
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
pthread_mutex_unlock(&lock);
goto endsocket;
}
@@ -3390,12 +3392,17 @@ void *ebpf_socket_thread(void *ptr)
ebpf_create_global_charts(em);
+ ebpf_update_stats(&plugin_statistics, em);
+
finalized_threads = 0;
pthread_mutex_unlock(&lock);
socket_collector((usec_t)(em->update_every * USEC_PER_SEC), em);
endsocket:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c
index 119c1222a..f5e79279f 100644
--- a/collectors/ebpf.plugin/ebpf_softirq.c
+++ b/collectors/ebpf.plugin/ebpf_softirq.c
@@ -209,6 +209,7 @@ static void softirq_collector(ebpf_module_t *em)
pthread_mutex_lock(&lock);
softirq_create_charts(em->update_every);
softirq_create_dims();
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
// loop and read from published data until ebpf plugin is closed.
@@ -259,14 +260,18 @@ void *ebpf_softirq_thread(void *ptr)
goto endsoftirq;
}
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endsoftirq;
}
softirq_collector(em);
endsoftirq:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
index 34750c79d..82eb9db88 100644
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ b/collectors/ebpf.plugin/ebpf_swap.c
@@ -675,8 +675,9 @@ void *ebpf_swap_thread(void *ptr)
if (!em->enabled)
goto endswap;
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endswap;
}
@@ -688,11 +689,15 @@ void *ebpf_swap_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_swap_charts(em->update_every);
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
swap_collector(em);
endswap:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c
index 4bd62bcae..b3f1b6524 100644
--- a/collectors/ebpf.plugin/ebpf_sync.c
+++ b/collectors/ebpf.plugin/ebpf_sync.c
@@ -74,7 +74,7 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em)
ebpf_sync_syscalls_t *w = &local_syscalls[i];
if (!w->probe_links && w->enabled) {
em->thread_name = w->syscall;
- w->probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &w->objects);
+ w->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &w->objects);
if (!w->probe_links) {
em->thread_name = saved_name;
return -1;
@@ -387,7 +387,7 @@ void *ebpf_sync_thread(void *ptr)
goto endsync;
if (ebpf_sync_initialize_syscall(em)) {
- pthread_mutex_unlock(&lock);
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endsync;
}
@@ -400,11 +400,15 @@ void *ebpf_sync_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_sync_charts(em->update_every);
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
sync_collector(em);
endsync:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/collectors/ebpf.plugin/ebpf_sync.h b/collectors/ebpf.plugin/ebpf_sync.h
index 1f811d341..e40c77a3f 100644
--- a/collectors/ebpf.plugin/ebpf_sync.h
+++ b/collectors/ebpf.plugin/ebpf_sync.h
@@ -47,7 +47,7 @@ enum netdata_sync_charts {
};
enum netdata_sync_table {
- NETDATA_SYNC_GLOBLAL_TABLE
+ NETDATA_SYNC_GLOBAL_TABLE
};
extern void *ebpf_sync_thread(void *ptr);
diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c
index 060469ec5..d1c418f85 100644
--- a/collectors/ebpf.plugin/ebpf_vfs.c
+++ b/collectors/ebpf.plugin/ebpf_vfs.c
@@ -1576,8 +1576,9 @@ void *ebpf_vfs_thread(void *ptr)
if (!em->enabled)
goto endvfs;
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects);
if (!probe_links) {
+ em->enabled = CONFIG_BOOLEAN_NO;
goto endvfs;
}
@@ -1591,11 +1592,15 @@ void *ebpf_vfs_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_global_charts(em);
+ ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
vfs_collector(em);
endvfs:
+ if (!em->enabled)
+ ebpf_update_disabled_plugin_stats(em);
+
netdata_thread_cleanup_pop(1);
return NULL;
}