summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin/ebpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/ebpf.plugin/ebpf.c')
-rw-r--r--collectors/ebpf.plugin/ebpf.c1002
1 files changed, 818 insertions, 184 deletions
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index 5cc005f30..71a13e84f 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -55,7 +55,6 @@ char *ebpf_plugin_dir = PLUGINS_DIR;
static char *ebpf_configured_log_dir = LOG_DIR;
char *ebpf_algorithms[] = {"absolute", "incremental"};
-int update_every = 1;
static int thread_finished = 0;
int close_ebpf_plugin = 0;
struct config collector_config = { .first_section = NULL,
@@ -67,7 +66,7 @@ struct config collector_config = { .first_section = NULL,
int running_on_kernel = 0;
char kernel_string[64];
int ebpf_nprocs;
-static int isrh;
+int isrh = 0;
uint32_t finalized_threads = 1;
pthread_mutex_t lock;
@@ -76,32 +75,109 @@ pthread_cond_t collect_data_cond_var;
ebpf_module_t ebpf_modules[] = {
{ .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = ebpf_process_create_apps_charts, .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL},
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_process_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config,
+ .config_file = NETDATA_PROCESS_CONFIG_FILE},
{ .thread_name = "socket", .config_name = "socket", .enabled = 0, .start_routine = ebpf_socket_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = ebpf_socket_create_apps_charts, .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL},
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_socket_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config,
+ .config_file = NETDATA_NETWORK_CONFIG_FILE},
{ .thread_name = "cachestat", .config_name = "cachestat", .enabled = 0, .start_routine = ebpf_cachestat_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = ebpf_cachestat_create_apps_charts, .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL},
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_cachestat_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config,
+ .config_file = NETDATA_CACHESTAT_CONFIG_FILE},
{ .thread_name = "sync", .config_name = "sync", .enabled = 0, .start_routine = ebpf_sync_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL },
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config,
+ .config_file = NETDATA_SYNC_CONFIG_FILE},
{ .thread_name = "dc", .config_name = "dc", .enabled = 0, .start_routine = ebpf_dcstat_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = ebpf_dcstat_create_apps_charts, .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE },
- { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1,
- .global_charts = 0, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL },
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_dcstat_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config,
+ .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE},
+ { .thread_name = "swap", .config_name = "swap", .enabled = 0, .start_routine = ebpf_swap_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_swap_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config,
+ .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE},
+ { .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
+ .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE },
+ { .thread_name = "filesystem", .config_name = "filesystem", .enabled = 0, .start_routine = ebpf_filesystem_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config,
+ .config_file = NETDATA_FILESYSTEM_CONFIG_FILE},
+ { .thread_name = "disk", .config_name = "disk", .enabled = 0, .start_routine = ebpf_disk_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config,
+ .config_file = NETDATA_DISK_CONFIG_FILE},
+ { .thread_name = "mount", .config_name = "mount", .enabled = 0, .start_routine = ebpf_mount_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config,
+ .config_file = NETDATA_MOUNT_CONFIG_FILE},
+ { .thread_name = "fd", .config_name = "fd", .enabled = 0, .start_routine = ebpf_fd_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_fd_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config,
+ .config_file = NETDATA_FD_CONFIG_FILE},
+ { .thread_name = "hardirq", .config_name = "hardirq", .enabled = 0, .start_routine = ebpf_hardirq_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config,
+ .config_file = NETDATA_HARDIRQ_CONFIG_FILE},
+ { .thread_name = "softirq", .config_name = "softirq", .enabled = 0, .start_routine = ebpf_softirq_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config,
+ .config_file = NETDATA_SOFTIRQ_CONFIG_FILE},
+ { .thread_name = "oomkill", .config_name = "oomkill", .enabled = 0, .start_routine = ebpf_oomkill_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_oomkill_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config,
+ .config_file = NETDATA_OOMKILL_CONFIG_FILE},
+ { .thread_name = "shm", .config_name = "shm", .enabled = 0, .start_routine = ebpf_shm_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_shm_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config,
+ .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE},
+ { .thread_name = "mdflush", .config_name = "mdflush", .enabled = 0, .start_routine = ebpf_mdflush_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config,
+ .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE},
+ { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY,
+ .global_charts = 0, .apps_charts = CONFIG_BOOLEAN_NO, .cgroup_charts = CONFIG_BOOLEAN_NO,
+ .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL,
+ .cfg = NULL, .config_name = NULL},
};
// Link with apps.plugin
ebpf_process_stat_t *global_process_stat = NULL;
+// Link with cgroup.plugin
+netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup = {NULL, NULL};
+int shm_fd_ebpf_cgroup = -1;
+sem_t *shm_sem_ebpf_cgroup = SEM_FAILED;
+pthread_mutex_t mutex_cgroup_shm;
+
//Network viewer
ebpf_network_viewer_options_t network_viewer_opt;
@@ -155,6 +231,33 @@ static void ebpf_exit(int sig)
freez(dcstat_pid);
}
+ if (ebpf_modules[EBPF_MODULE_SWAP_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_SWAP_IDX].enabled = 0;
+ clean_swap_pid_structures();
+ freez(swap_pid);
+ }
+
+ if (ebpf_modules[EBPF_MODULE_VFS_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_VFS_IDX].enabled = 0;
+ clean_vfs_pid_structures();
+ freez(vfs_pid);
+ }
+
+ if (ebpf_modules[EBPF_MODULE_FD_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_FD_IDX].enabled = 0;
+ clean_fd_pid_structures();
+ freez(fd_pid);
+ }
+
+ if (ebpf_modules[EBPF_MODULE_SHM_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_SHM_IDX].enabled = 0;
+ clean_shm_pid_structures();
+ freez(shm_pid);
+ }
+
+ ebpf_close_cgroup_shm();
+
+ ebpf_clean_cgroup_pids();
/*
int ret = fork();
if (ret < 0) // error
@@ -241,8 +344,7 @@ inline void write_end_chart()
*/
void write_chart_dimension(char *dim, long long value)
{
- int ret = printf("SET %s = %lld\n", dim, value);
- UNUSED(ret);
+ printf("SET %s = %lld\n", dim, value);
}
/**
@@ -253,7 +355,7 @@ void write_chart_dimension(char *dim, long long value)
* @param move the pointer with the values that will be published
* @param end the number of values that will be written on standard output
*
- * @return It returns a variable tha maps the charts that did not have zero values.
+ * @return It returns a variable that maps the charts that did not have zero values.
*/
void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end)
{
@@ -322,7 +424,7 @@ void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long
* @param dread the dimension name
* @param vread the value for previous dimension
*
- * @return It returns a variable tha maps the charts that did not have zero values.
+ * @return It returns a variable that maps the charts that did not have zero values.
*/
void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread)
{
@@ -337,6 +439,36 @@ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, c
/**
* Write chart cmd on standard output
*
+ * @param type chart type
+ * @param id chart id
+ * @param title chart title
+ * @param units units label
+ * @param family group name used to attach the chart on dashboard
+ * @param charttype chart type
+ * @param context chart context
+ * @param order chart order
+ * @param update_every update interval used by plugin
+ * @param module chart module name, this is the eBPF thread.
+ */
+void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *family,
+ char *charttype, char *context, int order, int update_every, char *module)
+{
+ printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d '' 'ebpf.plugin' '%s'\n",
+ type,
+ id,
+ title,
+ units,
+ (family)?family:"",
+ (context)?context:"",
+ (charttype)?charttype:"",
+ order,
+ update_every,
+ module);
+}
+
+/**
+ * Write chart cmd on standard output
+ *
* @param type chart type
* @param id chart id
* @param title chart title
@@ -345,11 +477,12 @@ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, c
* @param charttype chart type
* @param context chart context
* @param order chart order
+ * @param update_every value to overwrite the update frequency set by the server.
*/
-void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *family,
- char *charttype, char *context, int order)
+void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family,
+ char *charttype, char *context, int order, int update_every)
{
- printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d\n",
+ printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d 'obsolete'\n",
type,
id,
title,
@@ -395,17 +528,19 @@ void ebpf_create_global_dimension(void *ptr, int end)
/**
* Call write_chart_cmd to create the charts
*
- * @param type chart type
- * @param id chart id
- * @param title chart title
- * @param units axis label
- * @param family group name used to attach the chart on dashboard
- * @param context chart context
- * @param charttype chart type
- * @param order order number of the specified chart
- * @param ncd a pointer to a function called to create dimensions
- * @param move a pointer for a structure that has the dimensions
- * @param end number of dimensions for the chart created
+ * @param type chart type
+ * @param id chart id
+ * @param title chart title
+ * @param units axis label
+ * @param family group name used to attach the chart on dashboard
+ * @param context chart context
+ * @param charttype chart type
+ * @param order order number of the specified chart
+ * @param ncd a pointer to a function called to create dimensions
+ * @param move a pointer for a structure that has the dimensions
+ * @param end number of dimensions for the chart created
+ * @param update_every update interval used with chart.
+ * @param module chart module name, this is the eBPF thread.
*/
void ebpf_create_chart(char *type,
char *id,
@@ -417,11 +552,15 @@ void ebpf_create_chart(char *type,
int order,
void (*ncd)(void *, int),
void *move,
- int end)
+ int end,
+ int update_every,
+ char *module)
{
- ebpf_write_chart_cmd(type, id, title, units, family, charttype, context, order);
+ ebpf_write_chart_cmd(type, id, title, units, family, charttype, context, order, update_every, module);
- ncd(move, end);
+ if (ncd) {
+ ncd(move, end);
+ }
}
/**
@@ -435,12 +574,15 @@ void ebpf_create_chart(char *type,
* @param order the chart order
* @param algorithm the algorithm used by dimension
* @param root structure used to create the dimensions.
+ * @param update_every update interval used by plugin
+ * @param module chart module name, this is the eBPF thread.
*/
void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family, char *charttype, int order,
- char *algorithm, struct target *root)
+ char *algorithm, struct target *root, int update_every, char *module)
{
struct target *w;
- ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, charttype, NULL, order);
+ ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, charttype, NULL, order,
+ update_every, module);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed))
@@ -448,6 +590,31 @@ void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family
}
}
+/**
+ * Call the necessary functions to create a name.
+ *
+ * @param family family name
+ * @param name chart name
+ * @param hist0 histogram values
+ * @param dimensions dimension values.
+ * @param end number of bins that will be sent to Netdata.
+ *
+ * @return It returns a variable that maps the charts that did not have zero values.
+ */
+void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end)
+{
+ write_begin_chart(family, name);
+
+ uint32_t i;
+ for (i = 0; i < end; i++) {
+ write_chart_dimension(dimensions[i], (long long) hist[i]);
+ }
+
+ write_end_chart();
+
+ fflush(stdout);
+}
+
/*****************************************************************
*
* FUNCTIONS TO DEFINE OPTIONS
@@ -503,43 +670,68 @@ static inline void ebpf_set_thread_mode(netdata_run_mode_t lmode)
/**
* Enable specific charts selected by user.
*
- * @param em the structure that will be changed
- * @param enable the status about the apps charts.
+ * @param em the structure that will be changed
+ * @param disable_apps the status about the apps charts.
+ * @param disable_cgroup the status about the cgroups charts.
*/
-static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int enable)
+static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int disable_apps, int disable_cgroup)
{
- em->enabled = 1;
- if (!enable) {
- em->apps_charts = 1;
+ em->enabled = CONFIG_BOOLEAN_YES;
+
+ // oomkill stores data inside apps submenu, so it always need to have apps_enabled for plugin to create
+ // its chart, without this comparison eBPF.plugin will try to store invalid data when apps is disabled.
+ if (!disable_apps || !strcmp(em->thread_name, "oomkill")) {
+ em->apps_charts = CONFIG_BOOLEAN_YES;
}
- em->global_charts = 1;
+
+ if (!disable_cgroup) {
+ em->cgroup_charts = CONFIG_BOOLEAN_YES;
+ }
+
+ em->global_charts = CONFIG_BOOLEAN_YES;
}
/**
* Enable all charts
*
- * @param apps what is the current status of apps
+ * @param apps what is the current status of apps
+ * @param cgroups what is the current status of cgroups
*/
-static inline void ebpf_enable_all_charts(int apps)
+static inline void ebpf_enable_all_charts(int apps, int cgroups)
{
int i;
for (i = 0; ebpf_modules[i].thread_name; i++) {
- ebpf_enable_specific_chart(&ebpf_modules[i], apps);
+ ebpf_enable_specific_chart(&ebpf_modules[i], apps, cgroups);
}
}
/**
+ * Disable all Global charts
+ *
+ * Disable charts
+ */
+static inline void disable_all_global_charts()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].enabled = 0;
+ ebpf_modules[i].global_charts = 0;
+ }
+}
+
+
+/**
* Enable the specified chart group
*
* @param idx the index of ebpf_modules that I am enabling
* @param disable_apps should I keep apps charts?
*/
-static inline void ebpf_enable_chart(int idx, int disable_apps)
+static inline void ebpf_enable_chart(int idx, int disable_apps, int disable_cgroup)
{
int i;
for (i = 0; ebpf_modules[i].thread_name; i++) {
if (i == idx) {
- ebpf_enable_specific_chart(&ebpf_modules[i], disable_apps);
+ ebpf_enable_specific_chart(&ebpf_modules[i], disable_apps, disable_cgroup);
break;
}
}
@@ -559,6 +751,19 @@ static inline void ebpf_disable_apps()
}
/**
+ * Disable Cgroups
+ *
+ * Disable charts for apps loading only global charts.
+ */
+static inline void ebpf_disable_cgroups()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].cgroup_charts = 0;
+ }
+}
+
+/**
* Print help on standard error for user knows how to use the collector.
*/
void ebpf_print_help()
@@ -579,39 +784,140 @@ void ebpf_print_help()
" Released under GNU General Public License v3 or later.\n"
" All rights reserved.\n"
"\n"
- " This program is a data collector plugin for netdata.\n"
+ " This eBPF.plugin is a data collector plugin for netdata.\n"
"\n"
- " Available command line options:\n"
+ " This plugin only accepts long options with one or two dashes. The available command line options are:\n"
"\n"
- " SECONDS Set the data collection frequency.\n"
+ " SECONDS Set the data collection frequency.\n"
"\n"
- " --help or -h Show this help.\n"
+ " [-]-help Show this help.\n"
"\n"
- " --version or -v Show software version.\n"
+ " [-]-version Show software version.\n"
"\n"
- " --global or -g Disable charts per application.\n"
+ " [-]-global Disable charts per application and cgroup.\n"
"\n"
- " --all or -a Enable all chart groups (global and apps), unless -g is also given.\n"
+ " [-]-all Enable all chart groups (global, apps, and cgroup), unless -g is also given.\n"
"\n"
- " --cachestat or -c Enable charts related to process run time.\n"
+ " [-]-cachestat Enable charts related to process run time.\n"
"\n"
- " --dcstat or -d Enable charts related to directory cache.\n"
+ " [-]-dcstat Enable charts related to directory cache.\n"
"\n"
- " --net or -n Enable network viewer charts.\n"
+ " [-]-disk Enable charts related to disk monitoring.\n"
"\n"
- " --process or -p Enable charts related to process run time.\n"
+ " [-]-filesystem Enable chart related to filesystem run time.\n"
"\n"
- " --return or -r Run the collector in return mode.\n"
- "\n",
- " --sync or -s Enable chart related to sync run time.\n"
+ " [-]-hardirq Enable chart related to hard IRQ latency.\n"
+ "\n"
+ " [-]-mdflush Enable charts related to multi-device flush.\n"
+ "\n"
+ " [-]-mount Enable charts related to mount monitoring.\n"
"\n"
+ " [-]-net Enable network viewer charts.\n"
+ "\n"
+ " [-]-oomkill Enable chart related to OOM kill tracking.\n"
+ "\n"
+ " [-]-process Enable charts related to process run time.\n"
+ "\n"
+ " [-]-return Run the collector in return mode.\n"
+ "\n"
+ " [-]-shm Enable chart related to shared memory tracking.\n"
+ "\n"
+ " [-]-softirq Enable chart related to soft IRQ latency.\n"
+ "\n"
+ " [-]-sync Enable chart related to sync run time.\n"
+ "\n"
+ " [-]-swap Enable chart related to swap run time.\n"
+ "\n"
+ " [-]-vfs Enable chart related to vfs run time.\n"
+ "\n",
VERSION,
(year >= 116) ? year + 1900 : 2020);
}
/*****************************************************************
*
- * AUXILIAR FUNCTIONS USED DURING INITIALIZATION
+ * TRACEPOINT MANAGEMENT FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * Enable a tracepoint.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp)
+{
+ int test = ebpf_is_tracepoint_enabled(tp->class, tp->event);
+
+ // err?
+ if (test == -1) {
+ return -1;
+ }
+ // disabled?
+ else if (test == 0) {
+ // enable it then.
+ if (ebpf_enable_tracing_values(tp->class, tp->event)) {
+ return -1;
+ }
+ }
+
+ // enabled now or already was.
+ tp->enabled = true;
+
+ return 0;
+}
+
+/**
+ * Disable a tracepoint if it's enabled.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp)
+{
+ int test = ebpf_is_tracepoint_enabled(tp->class, tp->event);
+
+ // err?
+ if (test == -1) {
+ return -1;
+ }
+ // enabled?
+ else if (test == 1) {
+ // disable it then.
+ if (ebpf_disable_tracing_values(tp->class, tp->event)) {
+ return -1;
+ }
+ }
+
+ // disable now or already was.
+ tp->enabled = false;
+
+ return 0;
+}
+
+/**
+ * Enable multiple tracepoints on a list of tracepoints which end when the
+ * class is NULL.
+ *
+ * @return the number of successful enables.
+ */
+uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps)
+{
+ uint32_t cnt = 0;
+ for (int i = 0; tps[i].class != NULL; i++) {
+ if (ebpf_enable_tracepoint(&tps[i]) == -1) {
+ infoerr("failed to enable tracepoint %s:%s",
+ tps[i].class, tps[i].event);
+ }
+ else {
+ cnt += 1;
+ }
+ }
+ return cnt;
+}
+
+/*****************************************************************
+ *
+ * AUXILIARY FUNCTIONS USED DURING INITIALIZATION
*
*****************************************************************/
@@ -746,20 +1052,6 @@ static void ebpf_allocate_common_vectors()
}
/**
- * Fill the ebpf_data structure with default values
- *
- * @param ef the pointer to set default values
- */
-void fill_ebpf_data(ebpf_data_t *ef)
-{
- memset(ef, 0, sizeof(ebpf_data_t));
- ef->kernel_string = kernel_string;
- ef->running_on_kernel = running_on_kernel;
- ef->map_fd = callocz(EBPF_MAX_MAPS, sizeof(int));
- ef->isrh = isrh;
-}
-
-/**
* Define how to load the ebpf programs
*
* @param ptr the option given by users
@@ -778,13 +1070,16 @@ static inline void how_to_load(char *ptr)
* Update interval
*
* Update default interval with value from user
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
*/
-static void ebpf_update_interval()
+static void ebpf_update_interval(int update_every)
{
int i;
- int value = (int) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY, 1);
+ int value = (int) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY,
+ update_every);
for (i = 0; ebpf_modules[i].thread_name; i++) {
- ebpf_modules[i].update_time = value;
+ ebpf_modules[i].update_every = value;
}
}
@@ -807,9 +1102,11 @@ static void ebpf_update_table_size()
/**
* Read collector values
*
- * @param disable_apps variable to store information related to apps.
+ * @param disable_apps variable to store information related to apps.
+ * @param disable_cgroups variable to store information related to cgroups.
+ * @param update_every value to overwrite the update frequency set by the server.
*/
-static void read_collector_values(int *disable_apps)
+static void read_collector_values(int *disable_apps, int *disable_cgroups, int update_every)
{
// Read global section
char *value;
@@ -822,7 +1119,7 @@ static void read_collector_values(int *disable_apps)
how_to_load(value);
- ebpf_update_interval();
+ ebpf_update_interval(update_every);
ebpf_update_table_size();
@@ -837,12 +1134,17 @@ static void read_collector_values(int *disable_apps)
}
*disable_apps = (int)enabled;
+ // Cgroup is a positive sentence, so we need to invert the values to disable apps.
+ // We are using the same pattern for cgroup and apps
+ enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_CGROUP, CONFIG_BOOLEAN_NO);
+ *disable_cgroups = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO;
+
// Read ebpf programs section
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
ebpf_modules[EBPF_MODULE_PROCESS_IDX].config_name, CONFIG_BOOLEAN_YES);
int started = 0;
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_apps, *disable_cgroups);
started++;
}
@@ -855,7 +1157,7 @@ static void read_collector_values(int *disable_apps)
CONFIG_BOOLEAN_NO);
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps, *disable_cgroups);
// Read network viewer section if network viewer is enabled
// This is kept here to keep backward compatibility
parse_network_viewer_section(&collector_config);
@@ -869,13 +1171,13 @@ static void read_collector_values(int *disable_apps)
if (!enabled)
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connections",
CONFIG_BOOLEAN_NO);
- ebpf_modules[EBPF_MODULE_SOCKET_IDX].optional = enabled;
+ ebpf_modules[EBPF_MODULE_SOCKET_IDX].optional = (int)enabled;
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "cachestat",
CONFIG_BOOLEAN_NO);
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, *disable_apps, *disable_cgroups);
started++;
}
@@ -883,19 +1185,96 @@ static void read_collector_values(int *disable_apps)
CONFIG_BOOLEAN_YES);
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, *disable_apps, *disable_cgroups);
started++;
}
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "dcstat",
CONFIG_BOOLEAN_NO);
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "swap",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_SWAP_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "vfs",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_VFS_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "filesystem",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_FILESYSTEM_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "disk",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_DISK_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mount",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_MOUNT_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "fd",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_FD_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "hardirq",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_HARDIRQ_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "softirq",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_SOFTIRQ_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "oomkill",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_OOMKILL_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "shm",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_SHM_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mdflush",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_MDFLUSH_IDX, *disable_apps, *disable_cgroups);
started++;
}
if (!started){
- ebpf_enable_all_charts(*disable_apps);
+ ebpf_enable_all_charts(*disable_apps, *disable_cgroups);
// Read network viewer section
// This is kept here to keep backward compatibility
parse_network_viewer_section(&collector_config);
@@ -906,12 +1285,14 @@ static void read_collector_values(int *disable_apps)
/**
* Load collector config
*
- * @param path the path where the file ebpf.conf is stored.
- * @param disable_apps variable to store the information about apps plugin status.
+ * @param path the path where the file ebpf.conf is stored.
+ * @param disable_apps variable to store the information about apps plugin status.
+ * @param disable_cgroups variable to store the information about cgroups plugin status.
+ * @param update_every value to overwrite the update frequency set by the server.
*
* @return 0 on success and -1 otherwise.
*/
-static int load_collector_config(char *path, int *disable_apps)
+static int load_collector_config(char *path, int *disable_apps, int *disable_cgroups, int update_every)
{
char lpath[4096];
@@ -923,7 +1304,7 @@ static int load_collector_config(char *path, int *disable_apps)
}
}
- read_collector_values(disable_apps);
+ read_collector_values(disable_apps, disable_cgroups, update_every);
return 0;
}
@@ -957,6 +1338,21 @@ void set_global_variables()
isrh = get_redhat_release();
pid_max = get_system_pid_max();
+ running_on_kernel = ebpf_get_kernel_version();
+ ebpf_update_kernel(kernel_string, 63, isrh, running_on_kernel);
+}
+
+/**
+ * Load collector config
+ *
+ * @param lmode the mode that will be used for them.
+ */
+static inline void ebpf_load_thread_config()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_update_module(&ebpf_modules[i]);
+ }
}
/**
@@ -965,23 +1361,36 @@ void set_global_variables()
* @param argc the number of arguments
* @param argv the pointer to the arguments
*/
-static void parse_args(int argc, char **argv)
+static void ebpf_parse_args(int argc, char **argv)
{
- int enabled = 0;
int disable_apps = 0;
+ int disable_cgroups = 1;
int freq = 0;
int option_index = 0;
+ uint64_t select_threads = 0;
static struct option long_options[] = {
- {"help", no_argument, 0, 'h' },
- {"version", no_argument, 0, 'v' },
- {"global", no_argument, 0, 'g' },
- {"all", no_argument, 0, 'a' },
- {"cachestat", no_argument, 0, 'c' },
- {"dcstat", no_argument, 0, 'd' },
- {"net", no_argument, 0, 'n' },
- {"process", no_argument, 0, 'p' },
- {"return", no_argument, 0, 'r' },
- {"sync", no_argument, 0, 's' },
+ {"process", no_argument, 0, 0 },
+ {"net", no_argument, 0, 0 },
+ {"cachestat", no_argument, 0, 0 },
+ {"sync", no_argument, 0, 0 },
+ {"dcstat", no_argument, 0, 0 },
+ {"swap", no_argument, 0, 0 },
+ {"vfs", no_argument, 0, 0 },
+ {"filesystem", no_argument, 0, 0 },
+ {"disk", no_argument, 0, 0 },
+ {"mount", no_argument, 0, 0 },
+ {"filedescriptor", no_argument, 0, 0 },
+ {"hardirq", no_argument, 0, 0 },
+ {"softirq", no_argument, 0, 0 },
+ {"oomkill", no_argument, 0, 0 },
+ {"shm", no_argument, 0, 0 },
+ {"mdflush", no_argument, 0, 0 },
+ /* INSERT NEW THREADS BEFORE THIS COMMENT TO KEEP COMPATIBILITY WITH enum ebpf_module_indexes */
+ {"all", no_argument, 0, 0 },
+ {"version", no_argument, 0, 0 },
+ {"help", no_argument, 0, 0 },
+ {"global", no_argument, 0, 0 },
+ {"return", no_argument, 0, 0 },
{0, 0, 0, 0}
};
@@ -995,83 +1404,166 @@ static void parse_args(int argc, char **argv)
}
}
+ if (!freq)
+ freq = EBPF_DEFAULT_UPDATE_EVERY;
+
+ if (load_collector_config(ebpf_user_config_dir, &disable_apps, &disable_cgroups, freq)) {
+ info(
+ "Does not have a configuration file inside `%s/ebpf.d.conf. It will try to load stock file.",
+ ebpf_user_config_dir);
+ if (load_collector_config(ebpf_stock_config_dir, &disable_apps, &disable_cgroups, freq)) {
+ info("Does not have a stock file. It is starting with default options.");
+ }
+ }
+
+ ebpf_load_thread_config();
+
while (1) {
- int c = getopt_long(argc, argv, "hvgacdnprs", long_options, &option_index);
+ int c = getopt_long_only(argc, argv, "", long_options, &option_index);
if (c == -1)
break;
- switch (c) {
- case 'h': {
- ebpf_print_help();
- exit(0);
+ switch (option_index) {
+ case EBPF_MODULE_PROCESS_IDX: {
+ select_threads |= 1<<EBPF_MODULE_PROCESS_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"PROCESS\" charts, because it was started with the option \"[-]-process\".");
+#endif
+ break;
}
- case 'v': {
- printf("ebpf.plugin %s\n", VERSION);
- exit(0);
+ case EBPF_MODULE_SOCKET_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SOCKET_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"NET\" charts, because it was started with the option \"[-]-net\".");
+#endif
+ break;
}
- case 'g': {
- disable_apps = 1;
- ebpf_disable_apps();
+ case EBPF_MODULE_CACHESTAT_IDX: {
+ select_threads |= 1<<EBPF_MODULE_CACHESTAT_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info(
- "EBPF running with global chart group, because it was started with the option \"--global\" or \"-g\".");
+ info("EBPF enabling \"CACHESTAT\" charts, because it was started with the option \"[-]-cachestat\".");
#endif
break;
}
- case 'a': {
- ebpf_enable_all_charts(disable_apps);
+ case EBPF_MODULE_SYNC_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SYNC_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF running with all chart groups, because it was started with the option \"--all\" or \"-a\".");
+ info("EBPF enabling \"SYNC\" chart, because it was started with the option \"[-]-sync\".");
#endif
break;
}
- case 'c': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, disable_apps);
+ case EBPF_MODULE_DCSTAT_IDX: {
+ select_threads |= 1<<EBPF_MODULE_DCSTAT_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info(
- "EBPF enabling \"CACHESTAT\" charts, because it was started with the option \"--cachestat\" or \"-c\".");
+ info("EBPF enabling \"DCSTAT\" charts, because it was started with the option \"[-]-dcstat\".");
#endif
break;
}
- case 'd': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, disable_apps);
+ case EBPF_MODULE_SWAP_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SWAP_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info(
- "EBPF enabling \"DCSTAT\" charts, because it was started with the option \"--dcstat\" or \"-d\".");
+ info("EBPF enabling \"SWAP\" chart, because it was started with the option \"[-]-swap\".");
#endif
break;
}
- case 'n': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, disable_apps);
+ case EBPF_MODULE_VFS_IDX: {
+ select_threads |= 1<<EBPF_MODULE_VFS_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF enabling \"NET\" charts, because it was started with the option \"--net\" or \"-n\".");
+ info("EBPF enabling \"VFS\" chart, because it was started with the option \"[-]-vfs\".");
#endif
break;
}
- case 'p': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, disable_apps);
+ case EBPF_MODULE_FILESYSTEM_IDX: {
+ select_threads |= 1<<EBPF_MODULE_FILESYSTEM_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info(
- "EBPF enabling \"PROCESS\" charts, because it was started with the option \"--process\" or \"-p\".");
+ info("EBPF enabling \"FILESYSTEM\" chart, because it was started with the option \"[-]-filesystem\".");
#endif
break;
}
- case 'r': {
- ebpf_set_thread_mode(MODE_RETURN);
+ case EBPF_MODULE_DISK_IDX: {
+ select_threads |= 1<<EBPF_MODULE_DISK_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"DISK\" chart, because it was started with the option \"[-]-disk\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_MOUNT_IDX: {
+ select_threads |= 1<<EBPF_MODULE_MOUNT_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"MOUNT\" chart, because it was started with the option \"[-]-mount\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_FD_IDX: {
+ select_threads |= 1<<EBPF_MODULE_FD_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"FILEDESCRIPTOR\" chart, because it was started with the option \"[-]-filedescriptor\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_HARDIRQ_IDX: {
+ select_threads |= 1<<EBPF_MODULE_HARDIRQ_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"HARDIRQ\" chart, because it was started with the option \"[-]-hardirq\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_SOFTIRQ_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SOFTIRQ_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF running in \"return\" mode, because it was started with the option \"--return\" or \"-r\".");
+ info("EBPF enabling \"SOFTIRQ\" chart, because it was started with the option \"[-]-softirq\".");
#endif
break;
}
- case 's': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, disable_apps);
+ case EBPF_MODULE_OOMKILL_IDX: {
+ select_threads |= 1<<EBPF_MODULE_OOMKILL_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF enabling \"sync\" chart, because it was started with the option \"--sync\" or \"-s\".");
+ info("EBPF enabling \"OOMKILL\" chart, because it was started with the option \"[-]-oomkill\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_SHM_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SHM_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"SHM\" chart, because it was started with the option \"[-]-shm\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_MDFLUSH_IDX: {
+ select_threads |= 1<<EBPF_MODULE_MDFLUSH_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"MDFLUSH\" chart, because it was started with the option \"[-]-mdflush\".");
+#endif
+ break;
+ }
+ case EBPF_OPTION_ALL_CHARTS: {
+ disable_apps = 0;
+ disable_cgroups = 0;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with all chart groups, because it was started with the option \"[-]-all\".");
+#endif
+ break;
+ }
+ case EBPF_OPTION_VERSION: {
+ printf("ebpf.plugin %s\n", VERSION);
+ exit(0);
+ }
+ case EBPF_OPTION_HELP: {
+ ebpf_print_help();
+ exit(0);
+ }
+ case EBPF_OPTION_GLOBAL_CHART: {
+ disable_apps = 1;
+ disable_cgroups = 1;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with global chart group, because it was started with the option \"[-]-global\".");
+#endif
+ break;
+ }
+ case EBPF_OPTION_RETURN_MODE: {
+ ebpf_set_thread_mode(MODE_RETURN);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running in \"RETURN\" mode, because it was started with the option \"[-]-return\".");
#endif
break;
}
@@ -1081,44 +1573,34 @@ static void parse_args(int argc, char **argv)
}
}
- if (freq > 0) {
- update_every = freq;
- }
+ if (disable_apps || disable_cgroups) {
+ if (disable_apps)
+ ebpf_disable_apps();
- if (load_collector_config(ebpf_user_config_dir, &disable_apps)) {
- info(
- "Does not have a configuration file inside `%s/ebpf.d.conf. It will try to load stock file.",
- ebpf_user_config_dir);
- if (load_collector_config(ebpf_stock_config_dir, &disable_apps)) {
- info("Does not have a stock file. It is starting with default options.");
- } else {
- enabled = 1;
- }
- } else {
- enabled = 1;
- }
+ if (disable_cgroups)
+ ebpf_disable_cgroups();
- if (!enabled) {
- ebpf_enable_all_charts(disable_apps);
-#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF running with all charts, because neither \"-n\" or \"-p\" was given.");
-#endif
+ ebpf_enable_all_charts(disable_apps, disable_cgroups);
}
- if (disable_apps)
- return;
+ if (select_threads) {
+ disable_all_global_charts();
+ uint64_t idx;
+ for (idx = 0; idx < EBPF_OPTION_ALL_CHARTS; idx++) {
+ if (select_threads & 1<<idx)
+ ebpf_enable_specific_chart(&ebpf_modules[idx], disable_apps, disable_cgroups);
+ }
+ }
// Load apps_groups.conf
if (ebpf_read_apps_groups_conf(
&apps_groups_default_target, &apps_groups_root_target, ebpf_user_config_dir, "groups")) {
- info(
- "Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'",
- ebpf_user_config_dir, ebpf_stock_config_dir);
+ info("Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'",
+ ebpf_user_config_dir, ebpf_stock_config_dir);
if (ebpf_read_apps_groups_conf(
&apps_groups_default_target, &apps_groups_root_target, ebpf_stock_config_dir, "groups")) {
- error(
- "Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.",
- ebpf_stock_config_dir);
+ error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.",
+ ebpf_stock_config_dir);
thread_finished++;
ebpf_exit(1);
}
@@ -1133,6 +1615,136 @@ static void parse_args(int argc, char **argv)
*****************************************************************/
/**
+ * Update PID file
+ *
+ * Update the content of PID file
+ *
+ * @param filename is the full name of the file.
+ * @param pid that identifies the process
+ */
+static void ebpf_update_pid_file(char *filename, pid_t pid)
+{
+ FILE *fp = fopen(filename, "w");
+ if (!fp)
+ return;
+
+ fprintf(fp, "%d", pid);
+ fclose(fp);
+}
+
+/**
+ * Get Process Name
+ *
+ * Get process name from /proc/PID/status
+ *
+ * @param pid that identifies the process
+ */
+static char *ebpf_get_process_name(pid_t pid)
+{
+ char *name = NULL;
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "/proc/%d/status", pid);
+
+ procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ error("Cannot open %s", filename);
+ return name;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return name;
+
+ unsigned long i, lines = procfile_lines(ff);
+ for(i = 0; i < lines ; i++) {
+ char *cmp = procfile_lineword(ff, i, 0);
+ if (!strcmp(cmp, "Name:")) {
+ name = strdupz(procfile_lineword(ff, i, 1));
+ break;
+ }
+ }
+
+ procfile_close(ff);
+
+ return name;
+}
+
+/**
+ * Read Previous PID
+ *
+ * @param filename is the full name of the file.
+ *
+ * @return It returns the PID used during previous execution on success or 0 otherwise
+ */
+static pid_t ebpf_read_previous_pid(char *filename)
+{
+ FILE *fp = fopen(filename, "r");
+ if (!fp)
+ return 0;
+
+ char buffer[64];
+ size_t length = fread(buffer, sizeof(*buffer), 63, fp);
+ pid_t old_pid = 0;
+ if (length) {
+ if (length > 63)
+ length = 63;
+
+ buffer[length] = '\0';
+ old_pid = (pid_t)str2uint32_t(buffer);
+ }
+ fclose(fp);
+
+ return old_pid;
+}
+
+/**
+ * Kill previous process
+ *
+ * Kill previous process whether it was not closed.
+ *
+ * @param filename is the full name of the file.
+ * @param pid that identifies the process
+ */
+static void ebpf_kill_previous_process(char *filename, pid_t pid)
+{
+ pid_t old_pid = ebpf_read_previous_pid(filename);
+ if (!old_pid)
+ return;
+
+ // Process is not running
+ char *prev_name = ebpf_get_process_name(old_pid);
+ if (!prev_name)
+ return;
+
+ char *current_name = ebpf_get_process_name(pid);
+
+ if (!strcmp(prev_name, current_name))
+ kill(old_pid, SIGKILL);
+
+ freez(prev_name);
+ freez(current_name);
+
+ // wait few microseconds before start new plugin
+ sleep_usec(USEC_PER_MS * 300);
+}
+
+/**
+ * Manage PID
+ *
+ * This function kills another instance of eBPF whether it is necessary and update the file content.
+ *
+ * @param pid that identifies the process
+ */
+static void ebpf_manage_pid(pid_t pid)
+{
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s/ebpf.d/ebpf.pid", netdata_configured_host_prefix, ebpf_plugin_dir);
+
+ ebpf_kill_previous_process(filename, pid);
+ ebpf_update_pid_file(filename, pid);
+}
+
+/**
* Entry point
*
* @param argc the number of arguments
@@ -1143,9 +1755,9 @@ static void parse_args(int argc, char **argv)
int main(int argc, char **argv)
{
set_global_variables();
- parse_args(argc, argv);
+ ebpf_parse_args(argc, argv);
+ ebpf_manage_pid(getpid());
- running_on_kernel = get_kernel_version(kernel_string, 63);
if (!has_condition_to_run(running_on_kernel)) {
error("The current collector cannot run on this kernel.");
return 2;
@@ -1203,6 +1815,28 @@ int main(int argc, char **argv)
NULL, NULL, ebpf_modules[EBPF_MODULE_SYNC_IDX].start_routine},
{"EBPF DCSTAT" , NULL, NULL, 1,
NULL, NULL, ebpf_modules[EBPF_MODULE_DCSTAT_IDX].start_routine},
+ {"EBPF SWAP" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_SWAP_IDX].start_routine},
+ {"EBPF VFS" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_VFS_IDX].start_routine},
+ {"EBPF FILESYSTEM" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].start_routine},
+ {"EBPF DISK" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_DISK_IDX].start_routine},
+ {"EBPF MOUNT" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_MOUNT_IDX].start_routine},
+ {"EBPF FD" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_FD_IDX].start_routine},
+ {"EBPF HARDIRQ" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_HARDIRQ_IDX].start_routine},
+ {"EBPF SOFTIRQ" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_SOFTIRQ_IDX].start_routine},
+ {"EBPF OOMKILL" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_OOMKILL_IDX].start_routine},
+ {"EBPF SHM" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_SHM_IDX].start_routine},
+ {"EBPF MDFLUSH" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_MDFLUSH_IDX].start_routine},
{NULL , NULL, NULL, 0,
NULL, NULL, NULL}
};