diff options
Diffstat (limited to 'collectors/ebpf.plugin')
43 files changed, 2118 insertions, 1009 deletions
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md index 550982ad2..7762ed34f 100644 --- a/collectors/ebpf.plugin/README.md +++ b/collectors/ebpf.plugin/README.md @@ -137,6 +137,25 @@ If you do not need to monitor specific metrics for your `cgroups`, you can enabl `ebpf.d.conf`, and then disable the plugin for a specific `thread` by following the steps in the [Configuration](#configuring-ebpfplugin) section. +#### Collect PID + +When one of the previous integrations is enabled, `ebpf.plugin` will use Process Identifier (`PID`) to identify the +process group for which it needs to plot data. + +There are different ways to collect PID, and you can select the way `ebpf.plugin` collects data with the following +values: + +- `real parent`: This is the default mode. Collection will aggregate data for the real parent, the thread that creates + child threads. +- `parent`: Parent and real parent are the same when a process starts, but this value can be changed during run time. +- `all`: This option will store all PIDs that run on the host. Note, this method can be expensive for the host, + because more memory needs to be allocated and parsed. + +The threads that have integration with other collectors have an internal clean up wherein they attach either a +`trampoline` or a `kprobe` to `release_task` internal function. To avoid `overload` on this function, `ebpf.plugin` +will only enable these threads integrated with other collectors when the kernel is compiled with +`CONFIG_DEBUG_INFO_BTF`, unless you enable them manually. + #### Integration Dashboard Elements When an integration is enabled, your dashboard will also show the following cgroups and apps charts using low-level diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c index 65c96f672..00b53a57d 100644 --- a/collectors/ebpf.plugin/ebpf.c +++ b/collectors/ebpf.plugin/ebpf.c @@ -27,177 +27,317 @@ struct config collector_config = { .first_section = NULL, int running_on_kernel = 0; int ebpf_nprocs; int isrh = 0; +int main_thread_id = 0; pthread_mutex_t lock; +pthread_mutex_t ebpf_exit_cleanup; pthread_mutex_t collect_data_mutex; pthread_cond_t collect_data_cond_var; ebpf_module_t ebpf_modules[] = { { .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_process_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config, .config_file = NETDATA_PROCESS_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10, - .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10 | + NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "socket", .config_name = "socket", .enabled = 0, .start_routine = ebpf_socket_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_socket_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config, .config_file = NETDATA_NETWORK_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = socket_targets, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = socket_targets, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "cachestat", .config_name = "cachestat", .enabled = 0, .start_routine = ebpf_cachestat_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_cachestat_create_apps_charts, .maps = NULL, + .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_cachestat_create_apps_charts, .maps = cachestat_maps, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config, .config_file = NETDATA_CACHESTAT_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18| NETDATA_V5_4 | NETDATA_V5_15 | - NETDATA_V5_16, - .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18| + NETDATA_V5_4 | NETDATA_V5_14 | NETDATA_V5_15 | NETDATA_V5_16, + .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "sync", .config_name = "sync", .enabled = 0, .start_routine = ebpf_sync_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config, + .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config, .config_file = NETDATA_SYNC_CONFIG_FILE, // All syscalls have the same kernels - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = sync_targets, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = sync_targets, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "dc", .config_name = "dc", .enabled = 0, .start_routine = ebpf_dcstat_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, - .apps_routine = ebpf_dcstat_create_apps_charts, .maps = NULL, + .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_dcstat_create_apps_charts, .maps = dcstat_maps, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config, .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = dc_targets, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = dc_targets, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "swap", .config_name = "swap", .enabled = 0, .start_routine = ebpf_swap_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_swap_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config, .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = swap_targets, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = swap_targets, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config, .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = vfs_targets, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "filesystem", .config_name = "filesystem", .enabled = 0, .start_routine = ebpf_filesystem_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config, + .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config, .config_file = NETDATA_FILESYSTEM_CONFIG_FILE, //We are setting kernels as zero, because we load eBPF programs according the kernel running. - .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL }, + .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL }, { .thread_name = "disk", .config_name = "disk", .enabled = 0, .start_routine = ebpf_disk_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config, + .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config, .config_file = NETDATA_DISK_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "mount", .config_name = "mount", .enabled = 0, .start_routine = ebpf_mount_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config, + .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config, .config_file = NETDATA_MOUNT_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = mount_targets, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = mount_targets, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "fd", .config_name = "fd", .enabled = 0, .start_routine = ebpf_fd_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_fd_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config, .config_file = NETDATA_FD_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_11, - .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_11 | + NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = fd_targets, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "hardirq", .config_name = "hardirq", .enabled = 0, .start_routine = ebpf_hardirq_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config, + .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config, .config_file = NETDATA_HARDIRQ_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "softirq", .config_name = "softirq", .enabled = 0, .start_routine = ebpf_softirq_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config, + .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config, .config_file = NETDATA_SOFTIRQ_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "oomkill", .config_name = "oomkill", .enabled = 0, .start_routine = ebpf_oomkill_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_oomkill_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config, .config_file = NETDATA_OOMKILL_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "shm", .config_name = "shm", .enabled = 0, .start_routine = ebpf_shm_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_shm_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config, .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = shm_targets, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = shm_targets, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = "mdflush", .config_name = "mdflush", .enabled = 0, .start_routine = ebpf_mdflush_thread, .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, - .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config, + .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config, .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL}, { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY, - .global_charts = 0, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, - .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL, - .cfg = NULL, .config_name = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, - .objects = NULL}, + .global_charts = 0, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, + .pid_map_size = 0, .names = NULL, .cfg = NULL, .config_name = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY, + .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL}, }; struct netdata_static_thread ebpf_threads[] = { - {"EBPF PROCESS", NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF SOCKET" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF CACHESTAT" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF SYNC" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF DCSTAT" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF SWAP" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF VFS" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF FILESYSTEM" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF DISK" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF MOUNT" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF FD" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF HARDIRQ" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF SOFTIRQ" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF OOMKILL" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF SHM" , NULL, NULL, 1, - NULL, NULL, NULL}, - {"EBPF MDFLUSH" , NULL, NULL, 1, - NULL, NULL, NULL}, - {NULL , NULL, NULL, 0, - NULL, NULL, NULL} + { + .name = "EBPF PROCESS", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF SOCKET", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF CACHESTAT", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF SYNC", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF DCSTAT", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF SWAP", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF VFS", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF FILESYSTEM", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF DISK", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF MOUNT", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF FD", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF HARDIRQ", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF SOFTIRQ", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF OOMKILL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF SHM", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = "EBPF MDFLUSH", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, + { + .name = NULL, + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 0, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL + }, }; ebpf_filesystem_partitions_t localfs[] = @@ -294,6 +434,7 @@ ebpf_sync_syscalls_t local_syscalls[] = { } }; + // Link with apps.plugin ebpf_process_stat_t *global_process_stat = NULL; @@ -312,6 +453,8 @@ ebpf_plugin_stats_t plugin_statistics = {.core = 0, .legacy = 0, .running = 0, . #ifdef LIBBPF_MAJOR_VERSION struct btf *default_btf = NULL; +#else +void *default_btf = NULL; #endif char *btf_path = NULL; @@ -323,16 +466,16 @@ char *btf_path = NULL; /** * Close the collector gracefully - * - * @param sig is the signal number used to close the collector */ -static void ebpf_exit(int sig) +static void ebpf_exit() { #ifdef LIBBPF_MAJOR_VERSION + pthread_mutex_lock(&ebpf_exit_cleanup); if (default_btf) { btf__free(default_btf); default_btf = NULL; } + pthread_mutex_unlock(&ebpf_exit_cleanup); #endif char filename[FILENAME_MAX + 1]; @@ -340,7 +483,7 @@ static void ebpf_exit(int sig) if (unlink(filename)) error("Cannot remove PID file %s", filename); - exit(sig); + exit(0); } /** @@ -373,44 +516,66 @@ int ebpf_exit_plugin = 0; */ static void ebpf_stop_threads(int sig) { - ebpf_exit_plugin = 1; + UNUSED(sig); + static int only_one = 0; + int i; - for (i = 0; ebpf_threads[i].name != NULL; i++); + // Child thread should be closed by itself. + pthread_mutex_lock(&ebpf_exit_cleanup); + if (main_thread_id != gettid() || only_one) { + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + only_one = 1; + for (i = 0; ebpf_threads[i].name != NULL; i++) { + if (ebpf_threads[i].enabled != NETDATA_THREAD_EBPF_STOPPED) + netdata_thread_cancel(*ebpf_threads[i].thread); + } + pthread_mutex_unlock(&ebpf_exit_cleanup); - usec_t max = 2 * USEC_PER_SEC, step = 100000; + ebpf_exit_plugin = 1; + usec_t max = 3 * USEC_PER_SEC, step = 100000; while (i && max) { max -= step; sleep_usec(step); i = 0; int j; + pthread_mutex_lock(&ebpf_exit_cleanup); for (j = 0; ebpf_threads[j].name != NULL; j++) { - if (ebpf_threads[j].enabled != NETDATA_MAIN_THREAD_EXITED) + if (ebpf_threads[j].enabled != NETDATA_THREAD_EBPF_STOPPED) i++; } + pthread_mutex_unlock(&ebpf_exit_cleanup); } //Unload threads(except sync and filesystem) + pthread_mutex_lock(&ebpf_exit_cleanup); for (i = 0; ebpf_threads[i].name != NULL; i++) { - if (ebpf_threads[i].enabled == NETDATA_MAIN_THREAD_EXITED && i != EBPF_MODULE_FILESYSTEM_IDX && + if (ebpf_threads[i].enabled == NETDATA_THREAD_EBPF_STOPPED && i != EBPF_MODULE_FILESYSTEM_IDX && i != EBPF_MODULE_SYNC_IDX) ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links); } + pthread_mutex_unlock(&ebpf_exit_cleanup); //Unload filesystem - if (ebpf_threads[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_MAIN_THREAD_EXITED) { + pthread_mutex_lock(&ebpf_exit_cleanup); + if (ebpf_threads[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) { for (i = 0; localfs[i].filesystem != NULL; i++) { ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links); } } + pthread_mutex_unlock(&ebpf_exit_cleanup); //Unload Sync - if (ebpf_threads[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_MAIN_THREAD_EXITED) { + pthread_mutex_lock(&ebpf_exit_cleanup); + if (ebpf_threads[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) { for (i = 0; local_syscalls[i].syscall != NULL; i++) { ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links); } } + pthread_mutex_unlock(&ebpf_exit_cleanup); - ebpf_exit(sig); + ebpf_exit(); } /***************************************************************** @@ -1170,6 +1335,7 @@ static void read_local_addresses() int ebpf_start_pthread_variables() { pthread_mutex_init(&lock, NULL); + pthread_mutex_init(&ebpf_exit_cleanup, NULL); pthread_mutex_init(&collect_data_mutex, NULL); if (pthread_cond_init(&collect_data_cond_var, NULL)) { @@ -1261,35 +1427,28 @@ static void ebpf_update_table_size() /** * Set Load mode * - * @param load default load mode. + * @param origin specify the configuration file loaded */ -static inline void ebpf_set_load_mode(netdata_ebpf_load_mode_t load) +static inline void ebpf_set_load_mode(netdata_ebpf_load_mode_t load, netdata_ebpf_load_mode_t origin) { -#ifdef LIBBPF_MAJOR_VERSION - if (load == EBPF_LOAD_CORE || load == EBPF_LOAD_PLAY_DICE) { - load = (!default_btf) ? EBPF_LOAD_LEGACY : EBPF_LOAD_CORE; - } -#else - load = EBPF_LOAD_LEGACY; -#endif - int i; for (i = 0; ebpf_modules[i].thread_name; i++) { - // TO DO: Use `load` variable after we change all threads. - ebpf_modules[i].load = EBPF_LOAD_LEGACY; // load ; + ebpf_modules[i].load &= ~NETDATA_EBPF_LOAD_METHODS; + ebpf_modules[i].load |= load | origin ; } } /** * Update mode * - * @param str value read from configuration file. + * @param str value read from configuration file. + * @param origin specify the configuration file loaded */ -static inline void epbf_update_load_mode(char *str) +static inline void epbf_update_load_mode(char *str, netdata_ebpf_load_mode_t origin) { netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(str); - ebpf_set_load_mode(load); + ebpf_set_load_mode(load, origin); } /** @@ -1297,9 +1456,11 @@ static inline void epbf_update_load_mode(char *str) * * @param disable_apps variable to store information related to apps. * @param disable_cgroups variable to store information related to cgroups. - * @param update_every value to overwrite the update frequency set by the server. + * @param update_every value to overwrite the update frequency set by the server. + * @param origin specify the configuration file loaded */ -static void read_collector_values(int *disable_apps, int *disable_cgroups, int update_every) +static void read_collector_values(int *disable_apps, int *disable_cgroups, + int update_every, netdata_ebpf_load_mode_t origin) { // Read global section char *value; @@ -1321,7 +1482,7 @@ static void read_collector_values(int *disable_apps, int *disable_cgroups, int u value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, EBPF_CFG_DEFAULT_PROGRAM); - epbf_update_load_mode(value); + epbf_update_load_mode(value, origin); ebpf_update_interval(update_every); @@ -1499,6 +1660,7 @@ static void read_collector_values(int *disable_apps, int *disable_cgroups, int u static int load_collector_config(char *path, int *disable_apps, int *disable_cgroups, int update_every) { char lpath[4096]; + netdata_ebpf_load_mode_t origin; snprintf(lpath, 4095, "%s/%s", path, NETDATA_EBPF_CONFIG_FILE); if (!appconfig_load(&collector_config, lpath, 0, NULL)) { @@ -1506,9 +1668,11 @@ static int load_collector_config(char *path, int *disable_apps, int *disable_cgr if (!appconfig_load(&collector_config, lpath, 0, NULL)) { return -1; } - } + origin = EBPF_LOADED_FROM_STOCK; + } else + origin = EBPF_LOADED_FROM_USER; - read_collector_values(disable_apps, disable_cgroups, update_every); + read_collector_values(disable_apps, disable_cgroups, update_every, origin); return 0; } @@ -1552,7 +1716,7 @@ static inline void ebpf_load_thread_config() { int i; for (i = 0; ebpf_modules[i].thread_name; i++) { - ebpf_update_module(&ebpf_modules[i]); + ebpf_update_module(&ebpf_modules[i], default_btf, running_on_kernel, isrh); } } @@ -1771,14 +1935,14 @@ static void ebpf_parse_args(int argc, char **argv) break; } case EBPF_OPTION_LEGACY: { - ebpf_set_load_mode(EBPF_LOAD_LEGACY); + ebpf_set_load_mode(EBPF_LOAD_LEGACY, EBPF_LOADED_FROM_USER); #ifdef NETDATA_INTERNAL_CHECKS info("EBPF running with \"LEGACY\" code, because it was started with the option \"[-]-legacy\"."); #endif break; } case EBPF_OPTION_CORE: { - ebpf_set_load_mode(EBPF_LOAD_CORE); + ebpf_set_load_mode(EBPF_LOAD_CORE, EBPF_LOADED_FROM_USER); #ifdef NETDATA_INTERNAL_CHECKS info("EBPF running with \"CO-RE\" code, because it was started with the option \"[-]-core\"."); #endif @@ -1816,7 +1980,7 @@ static void ebpf_parse_args(int argc, char **argv) &apps_groups_default_target, &apps_groups_root_target, ebpf_stock_config_dir, "groups")) { error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", ebpf_stock_config_dir); - ebpf_exit(1); + ebpf_exit(); } } else info("Loaded config file '%s/apps_groups.conf'", ebpf_user_config_dir); @@ -1995,6 +2159,7 @@ static void ebpf_manage_pid(pid_t pid) int main(int argc, char **argv) { clocks_init(); + main_thread_id = gettid(); set_global_variables(); ebpf_parse_args(argc, argv); @@ -2035,7 +2200,7 @@ int main(int argc, char **argv) if (ebpf_start_pthread_variables()) { error("Cannot start mutex to control overall charts."); - ebpf_exit(5); + ebpf_exit(); } netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); @@ -2058,18 +2223,25 @@ int main(int argc, char **argv) int i; for (i = 0; ebpf_threads[i].name != NULL; i++) { struct netdata_static_thread *st = &ebpf_threads[i]; - st->thread = mallocz(sizeof(netdata_thread_t)); ebpf_module_t *em = &ebpf_modules[i]; - em->thread_id = i; - netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em); + em->thread = st; + // We always initialize process, because it is responsible to take care of apps integration + if (em->enabled || !i) { + st->thread = mallocz(sizeof(netdata_thread_t)); + em->thread_id = i; + st->enabled = NETDATA_THREAD_EBPF_RUNNING; + netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em); + } else { + st->enabled = NETDATA_THREAD_EBPF_STOPPED; + } } - usec_t step = 60 * USEC_PER_SEC; + usec_t step = EBPF_DEFAULT_UPDATE_EVERY * USEC_PER_SEC; heartbeat_t hb; heartbeat_init(&hb); //Plugin will be killed when it receives a signal - for (;;) { + while (!ebpf_exit_plugin) { (void)heartbeat_next(&hb, step); } diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf index aeba473ed..cf5c740fc 100644 --- a/collectors/ebpf.plugin/ebpf.d.conf +++ b/collectors/ebpf.plugin/ebpf.d.conf @@ -26,16 +26,16 @@ # # eBPF Programs # -# The eBPF collector enables and runs the following eBPF programs by default: +# The eBPF collector has the following eBPF programs: # # `cachestat` : Make charts for kernel functions related to page cache. # `dcstat` : Make charts for kernel functions related to directory cache. # `disk` : Monitor I/O latencies for disks # `fd` : This eBPF program creates charts that show information about file manipulation. -# `mdflush` : Monitors flush counts for multi-devices. -# `mount` : Monitor calls for syscalls mount and umount # `filesystem`: Monitor calls for functions used to manipulate specific filesystems # `hardirq` : Monitor latency of serving hardware interrupt requests (hard IRQs). +# `mdflush` : Monitors flush counts for multi-devices. +# `mount` : Monitor calls for syscalls mount and umount # `oomkill` : This eBPF program creates a chart that shows which process got OOM killed and when. # `process` : This eBPF program creates charts that show information about process life. # `shm` : Monitor calls for syscalls shmget, shmat, shmdt and shmctl. @@ -46,6 +46,9 @@ # `swap` : Monitor calls for internal swap functions. # `vfs` : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and # files removed. +# +# When plugin detects that system has support to BTF, it enables integration with apps.plugin. +# [ebpf programs] cachestat = no dcstat = no @@ -57,7 +60,7 @@ mount = yes oomkill = yes process = yes - shm = yes + shm = no socket = yes softirq = yes sync = yes diff --git a/collectors/ebpf.plugin/ebpf.d/cachestat.conf b/collectors/ebpf.plugin/ebpf.d/cachestat.conf index e2418394e..52466be51 100644 --- a/collectors/ebpf.plugin/ebpf.d/cachestat.conf +++ b/collectors/ebpf.plugin/ebpf.d/cachestat.conf @@ -19,6 +19,11 @@ # `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host. # `probe` : This is the same as legacy code. # +# The `collect pid` option defines the PID stored inside hash tables and accepts the following options: +# `real parent`: Only stores real parent inside PID +# `parent` : Only stores parent PID. +# `all` : Stores all PIDs used by software. This is the most expensive option. +# # Uncomment lines to define specific options for thread. [global] # ebpf load mode = entry @@ -28,3 +33,4 @@ # pid table size = 32768 ebpf type format = auto ebpf co-re tracing = trampoline + collect pid = real parent diff --git a/collectors/ebpf.plugin/ebpf.d/dcstat.conf b/collectors/ebpf.plugin/ebpf.d/dcstat.conf index 3986ae4f8..8aed8f783 100644 --- a/collectors/ebpf.plugin/ebpf.d/dcstat.conf +++ b/collectors/ebpf.plugin/ebpf.d/dcstat.conf @@ -17,6 +17,11 @@ # `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host. # `probe` : This is the same as legacy code. # +# The `collect pid` option defines the PID stored inside hash tables and accepts the following options: +# `real parent`: Only stores real parent inside PID +# `parent` : Only stores parent PID. +# `all` : Stores all PIDs used by software. This is the most expensive option. +# # Uncomment lines to define specific options for thread. [global] # ebpf load mode = entry @@ -26,3 +31,4 @@ # pid table size = 32768 ebpf type format = auto ebpf co-re tracing = trampoline + collect pid = real parent diff --git a/collectors/ebpf.plugin/ebpf.d/fd.conf b/collectors/ebpf.plugin/ebpf.d/fd.conf index f6edd3d93..8333520fc 100644 --- a/collectors/ebpf.plugin/ebpf.d/fd.conf +++ b/collectors/ebpf.plugin/ebpf.d/fd.conf @@ -11,9 +11,11 @@ # The `pid table size` defines the maximum number of PIDs stored inside the hash table. # # Uncomment lines to define specific options for thread. -#[global] +[global] # ebpf load mode = entry # apps = yes # cgroups = no # update every = 10 # pid table size = 32768 + ebpf type format = auto + ebpf co-re tracing = trampoline diff --git a/collectors/ebpf.plugin/ebpf.d/process.conf b/collectors/ebpf.plugin/ebpf.d/process.conf index f6edd3d93..1da5f84d3 100644 --- a/collectors/ebpf.plugin/ebpf.d/process.conf +++ b/collectors/ebpf.plugin/ebpf.d/process.conf @@ -9,6 +9,11 @@ # the setting `apps` and `cgroups` to 'no'. # # The `pid table size` defines the maximum number of PIDs stored inside the hash table. +# +# The `collect pid` option defines the PID stored inside hash tables and accepts the following options: +# `real parent`: Only stores real parent inside PID +# `parent` : Only stores parent PID. +# `all` : Stores all PIDs used by software. This is the most expensive option. # # Uncomment lines to define specific options for thread. #[global] @@ -17,3 +22,4 @@ # cgroups = no # update every = 10 # pid table size = 32768 +# collect pid = real parent diff --git a/collectors/ebpf.plugin/ebpf.d/vfs.conf b/collectors/ebpf.plugin/ebpf.d/vfs.conf index a65e0acbc..fa5d5b4e9 100644 --- a/collectors/ebpf.plugin/ebpf.d/vfs.conf +++ b/collectors/ebpf.plugin/ebpf.d/vfs.conf @@ -9,9 +9,11 @@ # the setting `apps` and `cgroups` to 'no'. # # Uncomment lines to define specific options for thread. -#[global] +[global] # ebpf load mode = entry # apps = yes # cgroups = no # update every = 10 # pid table size = 32768 + ebpf type format = auto + ebpf co-re tracing = trampoline diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h index c23ca332d..28b04ce48 100644 --- a/collectors/ebpf.plugin/ebpf.h +++ b/collectors/ebpf.plugin/ebpf.h @@ -163,11 +163,12 @@ enum ebpf_algorithms_list { }; // Threads -extern void *ebpf_process_thread(void *ptr); -extern void *ebpf_socket_thread(void *ptr); +void *ebpf_process_thread(void *ptr); +void *ebpf_socket_thread(void *ptr); // Common variables extern pthread_mutex_t lock; +extern pthread_mutex_t ebpf_exit_cleanup; extern int ebpf_nprocs; extern int running_on_kernel; extern int isrh; @@ -177,14 +178,14 @@ extern pthread_mutex_t collect_data_mutex; extern pthread_cond_t collect_data_cond_var; // Common functions -extern void ebpf_global_labels(netdata_syscall_stat_t *is, +void ebpf_global_labels(netdata_syscall_stat_t *is, netdata_publish_syscall_t *pio, char **dim, char **name, int *algorithm, int end); -extern void ebpf_write_chart_cmd(char *type, +void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, @@ -195,11 +196,11 @@ extern void ebpf_write_chart_cmd(char *type, int update_every, char *module); -extern void ebpf_write_global_dimension(char *name, char *id, char *algorithm); +void ebpf_write_global_dimension(char *name, char *id, char *algorithm); -extern void ebpf_create_global_dimension(void *ptr, int end); +void ebpf_create_global_dimension(void *ptr, int end); -extern void ebpf_create_chart(char *type, +void ebpf_create_chart(char *type, char *id, char *title, char *units, @@ -213,18 +214,18 @@ extern void ebpf_create_chart(char *type, int update_every, char *module); -extern void write_begin_chart(char *family, char *name); +void write_begin_chart(char *family, char *name); -extern void write_chart_dimension(char *dim, long long value); +void write_chart_dimension(char *dim, long long value); -extern void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end); +void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end); -extern void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end); +void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end); -extern void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, +void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread); -extern void ebpf_create_charts_on_apps(char *name, +void ebpf_create_charts_on_apps(char *name, char *title, char *units, char *family, @@ -235,15 +236,15 @@ extern void ebpf_create_charts_on_apps(char *name, int update_every, char *module); -extern void write_end_chart(); +void write_end_chart(); -extern void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps); +void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps); -extern int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp); -extern int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp); -extern uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps); +int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp); +int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp); +uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps); -extern void ebpf_pid_file(char *filename, size_t length); +void ebpf_pid_file(char *filename, size_t length); #define EBPF_PROGRAMS_SECTION "ebpf programs" @@ -271,19 +272,23 @@ extern sem_t *shm_sem_ebpf_cgroup; extern pthread_mutex_t mutex_cgroup_shm; extern size_t all_pids_count; extern ebpf_plugin_stats_t plugin_statistics; +#ifdef LIBBPF_MAJOR_VERSION extern struct btf *default_btf; +#else +extern void *default_btf; +#endif // Socket functions and variables // Common functions -extern void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr); -extern void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr); -extern void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *root); -extern void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1); -extern collected_number get_value_from_structure(char *basis, size_t offset); -extern void ebpf_update_pid_table(ebpf_local_maps_t *pid, ebpf_module_t *em); -extern void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family, +void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr); +void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr); +void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *root); +void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1); +collected_number get_value_from_structure(char *basis, size_t offset); +void ebpf_update_pid_table(ebpf_local_maps_t *pid, ebpf_module_t *em); +void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family, char *charttype, char *context, int order, int update_every); -extern void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end); +void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end); void ebpf_update_disabled_plugin_stats(ebpf_module_t *em); extern ebpf_filesystem_partitions_t localfs[]; extern ebpf_sync_syscalls_t local_syscalls[]; diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h index f65a137b5..0bea9122f 100644 --- a/collectors/ebpf.plugin/ebpf_apps.h +++ b/collectors/ebpf.plugin/ebpf_apps.h @@ -408,30 +408,30 @@ static inline void debug_log_int(const char *fmt, ...) // extern struct pid_stat **all_pids; -extern int ebpf_read_apps_groups_conf(struct target **apps_groups_default_target, +int ebpf_read_apps_groups_conf(struct target **apps_groups_default_target, struct target **apps_groups_root_target, const char *path, const char *file); -extern void clean_apps_groups_target(struct target *apps_groups_root_target); +void clean_apps_groups_target(struct target *apps_groups_root_target); -extern size_t zero_all_targets(struct target *root); +size_t zero_all_targets(struct target *root); -extern int am_i_running_as_root(); +int am_i_running_as_root(); -extern void cleanup_exited_pids(); +void cleanup_exited_pids(); -extern int ebpf_read_hash_table(void *ep, int fd, uint32_t pid); +int ebpf_read_hash_table(void *ep, int fd, uint32_t pid); -extern int get_pid_comm(pid_t pid, size_t n, char *dest); +int get_pid_comm(pid_t pid, size_t n, char *dest); -extern size_t read_processes_statistic_using_pid_on_target(ebpf_process_stat_t **ep, +size_t read_processes_statistic_using_pid_on_target(ebpf_process_stat_t **ep, int fd, struct pid_on_target *pids); -extern size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct pid_on_target *pids); +size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct pid_on_target *pids); -extern void collect_data_for_all_processes(int tbl_pid_stats_fd); +void collect_data_for_all_processes(int tbl_pid_stats_fd); extern ebpf_process_stat_t **global_process_stats; extern ebpf_process_publish_apps_t **current_apps_data; diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c index 14669bf68..4c410647d 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/collectors/ebpf.plugin/ebpf_cachestat.c @@ -15,11 +15,16 @@ netdata_cachestat_pid_t *cachestat_vector = NULL; static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END]; static netdata_idx_t *cachestat_values = NULL; -struct netdata_static_thread cachestat_threads = {"CACHESTAT KERNEL", - NULL, NULL, 1, NULL, - NULL, NULL}; - -static ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = NETDATA_CACHESTAT_END, +struct netdata_static_thread cachestat_threads = {.name = "CACHESTAT KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL}; + +ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = NETDATA_CACHESTAT_END, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, @@ -39,7 +44,6 @@ struct config cachestat_config = { .first_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; -static enum ebpf_threads_status ebpf_cachestat_exited = NETDATA_THREAD_EBPF_RUNNING; netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "mark_page_accessed", .mode = EBPF_LOAD_TRAMPOLINE}, @@ -67,6 +71,7 @@ static void ebpf_cachestat_disable_probe(struct cachestat_bpf *obj) bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false); bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false); bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_release_task_kprobe, false); } /* @@ -105,6 +110,7 @@ static void ebpf_cachestat_disable_trampoline(struct cachestat_bpf *obj) bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false); bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false); bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false); } /* @@ -156,6 +162,9 @@ static inline void netdata_set_trampoline_target(struct cachestat_bpf *obj) bpf_program__set_attach_target(obj->progs.netdata_mark_buffer_dirty_fentry, 0, cachestat_targets[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY].name); + + bpf_program__set_attach_target(obj->progs.netdata_release_task_fentry, 0, + EBPF_COMMON_FNCT_CLEAN_UP); } /** @@ -210,6 +219,13 @@ static int ebpf_cachestat_attach_probe(struct cachestat_bpf *obj) if (ret) return -1; + obj->links.netdata_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_release_task_kprobe, + false, + EBPF_COMMON_FNCT_CLEAN_UP); + ret = libbpf_get_error(obj->links.netdata_release_task_kprobe); + if (ret) + return -1; + return 0; } @@ -242,6 +258,19 @@ static void ebpf_cachestat_set_hash_tables(struct cachestat_bpf *obj) } /** + * Disable Release Task + * + * Disable release task when apps is not enabled. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_cachestat_disable_release_task(struct cachestat_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_release_task_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false); +} + +/** * Load and attach * * Load and attach the eBPF code in kernel. @@ -266,13 +295,16 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf ebpf_cachestat_disable_specific_probe(obj); } + ebpf_cachestat_adjust_map_size(obj, em); + + if (!em->apps_charts && !em->cgroup_charts) + ebpf_cachestat_disable_release_task(obj); + int ret = cachestat_bpf__load(obj); if (ret) { return ret; } - ebpf_cachestat_adjust_map_size(obj, em); - ret = (test == EBPF_LOAD_TRAMPOLINE) ? cachestat_bpf__attach(obj) : ebpf_cachestat_attach_probe(obj); if (!ret) { ebpf_cachestat_set_hash_tables(obj); @@ -290,6 +322,38 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf *****************************************************************/ /** + * Cachestat Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_cachestat_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated); + + freez(cachestat_vector); + freez(cachestat_values); + freez(cachestat_threads.thread); + +#ifdef LIBBPF_MAJOR_VERSION + if (bpf_obj) + cachestat_bpf__destroy(bpf_obj); +#endif + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} + +/** * Cachestat exit. * * Cancel child and exit. @@ -299,12 +363,8 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf static void ebpf_cachestat_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_cachestat_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*cachestat_threads.thread); + ebpf_cachestat_free(em); } /** @@ -317,21 +377,7 @@ static void ebpf_cachestat_exit(void *ptr) static void ebpf_cachestat_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_cachestat_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated); - - freez(cachestat_vector); - freez(cachestat_values); - freez(cachestat_threads.thread); - -#ifdef LIBBPF_MAJOR_VERSION - if (bpf_obj) - cachestat_bpf__destroy(bpf_obj); -#endif - cachestat_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_cachestat_free(em); } /***************************************************************** @@ -649,17 +695,12 @@ void *ebpf_cachestat_read_hash(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_LATENCY_CACHESTAT_SLEEP_MS * em->update_every; - while (ebpf_cachestat_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_cachestat_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); read_global_table(); } - ebpf_cachestat_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -872,21 +913,16 @@ static void ebpf_create_systemd_cachestat_charts(int update_every) * Send Cache Stat charts * * Send collected data to Netdata. - * - * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned - * otherwise function returns 1 to avoid chart recreation */ -static int ebpf_send_systemd_cachestat_charts() +static void ebpf_send_systemd_cachestat_charts() { - int ret = 1; ebpf_cgroup_target_t *ect; write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_cachestat.ratio); - } else if (unlikely(ect->systemd)) - ret = 0; + } } write_end_chart(); @@ -913,8 +949,6 @@ static int ebpf_send_systemd_cachestat_charts() } } write_end_chart(); - - return ret; } /** @@ -1038,13 +1072,11 @@ void ebpf_cachestat_send_cgroup_data(int update_every) int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; if (has_systemd) { - static int systemd_charts = 0; - if (!systemd_charts) { + if (send_cgroup_chart) { ebpf_create_systemd_cachestat_charts(update_every); - systemd_charts = 1; } - systemd_charts = ebpf_send_systemd_cachestat_charts(); + ebpf_send_systemd_cachestat_charts(); } for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { @@ -1227,7 +1259,8 @@ static void ebpf_cachestat_set_internal_value() static int ebpf_cachestat_load_bpf(ebpf_module_t *em) { int ret = 0; - if (em->load == EBPF_LOAD_LEGACY) { + ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].mode); + if (em->load & EBPF_LOAD_LEGACY) { em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { ret = -1; @@ -1267,16 +1300,13 @@ void *ebpf_cachestat_thread(void *ptr) ebpf_update_pid_table(&cachestat_maps[NETDATA_CACHESTAT_PID_STATS], em); - if (!em->enabled) - goto endcachestat; - ebpf_cachestat_set_internal_value(); #ifdef LIBBPF_MAJOR_VERSION ebpf_adjust_thread_load(em, default_btf); #endif if (ebpf_cachestat_load_bpf(em)) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endcachestat; } @@ -1298,8 +1328,7 @@ void *ebpf_cachestat_thread(void *ptr) cachestat_collector(em); endcachestat: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h index fdd88464a..07f0745d4 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.h +++ b/collectors/ebpf.plugin/ebpf_cachestat.h @@ -81,9 +81,10 @@ typedef struct netdata_publish_cachestat { netdata_cachestat_pid_t prev; } netdata_publish_cachestat_t; -extern void *ebpf_cachestat_thread(void *ptr); +void *ebpf_cachestat_thread(void *ptr); extern struct config cachestat_config; extern netdata_ebpf_targets_t cachestat_targets[]; +extern ebpf_local_maps_t cachestat_maps[]; #endif // NETDATA_EBPF_CACHESTAT_H diff --git a/collectors/ebpf.plugin/ebpf_cgroup.c b/collectors/ebpf.plugin/ebpf_cgroup.c index 24469c642..42c045368 100644 --- a/collectors/ebpf.plugin/ebpf_cgroup.c +++ b/collectors/ebpf.plugin/ebpf_cgroup.c @@ -6,6 +6,7 @@ #include "ebpf_cgroup.h" ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL; +int send_cgroup_chart = 0; // -------------------------------------------------------------------------------------------------------------------- // Map shared memory @@ -99,24 +100,6 @@ void ebpf_map_cgroup_shared_memory() // Close and Cleanup /** - * Close shared memory - */ -void ebpf_close_cgroup_shm() -{ - if (shm_sem_ebpf_cgroup != SEM_FAILED) { - sem_close(shm_sem_ebpf_cgroup); - sem_unlink(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME); - shm_sem_ebpf_cgroup = SEM_FAILED; - } - - if (shm_fd_ebpf_cgroup > 0) { - close(shm_fd_ebpf_cgroup); - shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME); - shm_fd_ebpf_cgroup = -1; - } -} - -/** * Clean Specific cgroup pid * * Clean all PIDs associated with cgroup. @@ -259,7 +242,7 @@ static void ebpf_update_pid_link_list(ebpf_cgroup_target_t *ect, char *path) * * Set variable remove. If this variable is not reset, the structure will be removed from link list. */ - void ebpf_reset_updated_var() +void ebpf_reset_updated_var() { ebpf_cgroup_target_t *ect; for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { @@ -274,6 +257,7 @@ static void ebpf_update_pid_link_list(ebpf_cgroup_target_t *ect, char *path) */ void ebpf_parse_cgroup_shm_data() { + static int previous = 0; if (shm_ebpf_cgroup.header) { sem_wait(shm_sem_ebpf_cgroup); int i, end = shm_ebpf_cgroup.header->cgroup_root_count; @@ -291,6 +275,11 @@ void ebpf_parse_cgroup_shm_data() ebpf_update_pid_link_list(ect, ptr->path); } } + send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count; + previous = shm_ebpf_cgroup.header->cgroup_root_count; +#ifdef NETDATA_DEV_MODE + error("Updating cgroup %d (Previous: %d, Current: %d)", send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count); +#endif pthread_mutex_unlock(&mutex_cgroup_shm); sem_post(shm_sem_ebpf_cgroup); diff --git a/collectors/ebpf.plugin/ebpf_cgroup.h b/collectors/ebpf.plugin/ebpf_cgroup.h index cca9a950d..19da7fca9 100644 --- a/collectors/ebpf.plugin/ebpf_cgroup.h +++ b/collectors/ebpf.plugin/ebpf_cgroup.h @@ -60,10 +60,10 @@ typedef struct ebpf_cgroup_target { struct ebpf_cgroup_target *next; } ebpf_cgroup_target_t; -extern void ebpf_map_cgroup_shared_memory(); -extern void ebpf_parse_cgroup_shm_data(); -extern void ebpf_close_cgroup_shm(); -extern void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order, +void ebpf_map_cgroup_shared_memory(); +void ebpf_parse_cgroup_shm_data(); +void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order, char *algorithm, char *context, char *module, int update_every); +extern int send_cgroup_chart; #endif /* NETDATA_EBPF_CGROUP_H */ diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c index 8cf063ca1..71169e153 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/collectors/ebpf.plugin/ebpf_dcstat.c @@ -20,11 +20,15 @@ struct config dcstat_config = { .first_section = NULL, .rwlock = AVL_LOCK_INITIALIZER } }; struct netdata_static_thread dcstat_threads = {"DCSTAT KERNEL", - NULL, NULL, 1, NULL, - NULL, NULL}; -static enum ebpf_threads_status ebpf_dcstat_exited = NETDATA_THREAD_EBPF_RUNNING; - -static ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END, + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL}; + +ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = "dcstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, @@ -65,6 +69,7 @@ static inline void ebpf_dc_disable_probes(struct dc_bpf *obj) { bpf_program__set_autoload(obj->progs.netdata_lookup_fast_kprobe, false); bpf_program__set_autoload(obj->progs.netdata_d_lookup_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_kprobe, false); } /* @@ -78,6 +83,7 @@ static inline void ebpf_dc_disable_trampoline(struct dc_bpf *obj) { bpf_program__set_autoload(obj->progs.netdata_lookup_fast_fentry, false); bpf_program__set_autoload(obj->progs.netdata_d_lookup_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_fentry, false); } /** @@ -94,6 +100,9 @@ static void ebpf_dc_set_trampoline_target(struct dc_bpf *obj) bpf_program__set_attach_target(obj->progs.netdata_d_lookup_fexit, 0, dc_targets[NETDATA_DC_TARGET_D_LOOKUP].name); + + bpf_program__set_attach_target(obj->progs.netdata_dcstat_release_task_fentry, 0, + EBPF_COMMON_FNCT_CLEAN_UP); } /** @@ -125,6 +134,13 @@ static int ebpf_dc_attach_probes(struct dc_bpf *obj) if (ret) return -1; + obj->links.netdata_dcstat_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_dcstat_release_task_kprobe, + false, + EBPF_COMMON_FNCT_CLEAN_UP); + ret = libbpf_get_error(obj->links.netdata_dcstat_release_task_kprobe); + if (ret) + return -1; + return 0; } @@ -180,6 +196,19 @@ netdata_ebpf_program_loaded_t ebpf_dc_update_load(ebpf_module_t *em) } /** + * Disable Release Task + * + * Disable release task when apps is not enabled. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_dc_disable_release_task(struct dc_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_fentry, false); +} + +/** * Load and attach * * Load and attach the eBPF code in kernel. @@ -200,13 +229,16 @@ static inline int ebpf_dc_load_and_attach(struct dc_bpf *obj, ebpf_module_t *em) ebpf_dc_disable_trampoline(obj); } + ebpf_dc_adjust_map_size(obj, em); + + if (!em->apps_charts && !em->cgroup_charts) + ebpf_dc_disable_release_task(obj); + int ret = dc_bpf__load(obj); if (ret) { return ret; } - ebpf_dc_adjust_map_size(obj, em); - ret = (test == EBPF_LOAD_TRAMPOLINE) ? dc_bpf__attach(obj) : ebpf_dc_attach_probes(obj); if (!ret) { ebpf_dc_set_hash_tables(obj); @@ -262,33 +294,21 @@ void ebpf_dcstat_clean_names() } /** - * DCstat exit + * DCstat Free * - * Cancel child and exit. + * Cleanup variables after child threads to stop * * @param ptr thread data. */ -static void ebpf_dcstat_exit(void *ptr) +static void ebpf_dcstat_free(ebpf_module_t *em ) { - ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); return; } - - ebpf_dcstat_exited = NETDATA_THREAD_EBPF_STOPPING; -} - -/** - * Clean up the main thread. - * - * @param ptr thread data. - */ -static void ebpf_dcstat_cleanup(void *ptr) -{ - ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_dcstat_exited != NETDATA_THREAD_EBPF_STOPPED) - return; + pthread_mutex_unlock(&ebpf_exit_cleanup); freez(dcstat_vector); freez(dcstat_values); @@ -303,8 +323,34 @@ static void ebpf_dcstat_cleanup(void *ptr) dc_bpf__destroy(bpf_obj); #endif - dcstat_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} + +/** + * DCstat exit + * + * Cancel child and exit. + * + * @param ptr thread data. + */ +static void ebpf_dcstat_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + netdata_thread_cancel(*dcstat_threads.thread); + ebpf_dcstat_free(em); +} + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_dcstat_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + ebpf_dcstat_free(em); } /***************************************************************** @@ -531,17 +577,12 @@ void *ebpf_dcstat_read_hash(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_LATENCY_DCSTAT_SLEEP_MS * em->update_every; - while (ebpf_dcstat_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_dcstat_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); read_global_table(); } - ebpf_dcstat_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -860,21 +901,16 @@ static void ebpf_create_systemd_dc_charts(int update_every) * Send Directory Cache charts * * Send collected data to Netdata. - * - * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned - * otherwise function returns 1 to avoid chart recreation */ -static int ebpf_send_systemd_dc_charts() +static void ebpf_send_systemd_dc_charts() { - int ret = 1; collected_number value; ebpf_cgroup_target_t *ect; write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_HIT_CHART); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->publish_dc.ratio); - } else if (unlikely(ect->systemd)) - ret = 0; + } } write_end_chart(); @@ -910,8 +946,6 @@ static int ebpf_send_systemd_dc_charts() } } write_end_chart(); - - return ret; } /** @@ -966,13 +1000,11 @@ void ebpf_dc_send_cgroup_data(int update_every) int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; if (has_systemd) { - static int systemd_charts = 0; - if (!systemd_charts) { + if (send_cgroup_chart) { ebpf_create_systemd_dc_charts(update_every); - systemd_charts = 1; } - systemd_charts = ebpf_send_systemd_dc_charts(); + ebpf_send_systemd_dc_charts(); } for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { @@ -1116,7 +1148,8 @@ static void ebpf_dcstat_allocate_global_vectors(int apps) static int ebpf_dcstat_load_bpf(ebpf_module_t *em) { int ret = 0; - if (em->load == EBPF_LOAD_LEGACY) { + ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_DC_TARGET_LOOKUP_FAST].mode); + if (em->load & EBPF_LOAD_LEGACY) { em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { ret = -1; @@ -1158,14 +1191,11 @@ void *ebpf_dcstat_thread(void *ptr) ebpf_update_names(dc_optional_name, em); - if (!em->enabled) - goto enddcstat; - #ifdef LIBBPF_MAJOR_VERSION ebpf_adjust_thread_load(em, default_btf); #endif if (ebpf_dcstat_load_bpf(em)) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto enddcstat; } @@ -1188,8 +1218,7 @@ void *ebpf_dcstat_thread(void *ptr) dcstat_collector(em); enddcstat: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h index 5c4a80cd7..d8687f968 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.h +++ b/collectors/ebpf.plugin/ebpf_dcstat.h @@ -75,9 +75,10 @@ typedef struct netdata_publish_dcstat { netdata_dcstat_pid_t prev; } netdata_publish_dcstat_t; -extern void *ebpf_dcstat_thread(void *ptr); -extern void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr); +void *ebpf_dcstat_thread(void *ptr); +void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr); extern struct config dcstat_config; extern netdata_ebpf_targets_t dc_targets[]; +extern ebpf_local_maps_t dcstat_maps[]; #endif // NETDATA_EBPF_DCSTAT_H diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c index 96b1705ce..a27bd81e3 100644 --- a/collectors/ebpf.plugin/ebpf_disk.c +++ b/collectors/ebpf.plugin/ebpf_disk.c @@ -33,10 +33,16 @@ static netdata_syscall_stat_t disk_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS]; static netdata_publish_syscall_t disk_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS]; static netdata_idx_t *disk_hash_values = NULL; -static struct netdata_static_thread disk_threads = {"DISK KERNEL", - NULL, NULL, 1, NULL, - NULL, NULL }; -static enum ebpf_threads_status ebpf_disk_exited = NETDATA_THREAD_EBPF_RUNNING; +static struct netdata_static_thread disk_threads = { + .name = "DISK KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; ebpf_publish_disk_t *plot_disks = NULL; pthread_mutex_t plot_mutex; @@ -424,6 +430,40 @@ static void ebpf_cleanup_disk_list() } /** + * DISK Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_disk_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + ebpf_disk_disable_tracepoints(); + + if (dimensions) + ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS); + + freez(disk_hash_values); + freez(disk_threads.thread); + pthread_mutex_destroy(&plot_mutex); + + ebpf_cleanup_plot_disks(); + ebpf_cleanup_disk_list(); + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} + +/** * Disk exit. * * Cancel child and exit. @@ -433,12 +473,8 @@ static void ebpf_cleanup_disk_list() static void ebpf_disk_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_disk_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*disk_threads.thread); + ebpf_disk_free(em); } /** @@ -451,23 +487,7 @@ static void ebpf_disk_exit(void *ptr) static void ebpf_disk_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_disk_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - ebpf_disk_disable_tracepoints(); - - if (dimensions) - ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS); - - freez(disk_hash_values); - freez(disk_threads.thread); - pthread_mutex_destroy(&plot_mutex); - - ebpf_cleanup_plot_disks(); - ebpf_cleanup_disk_list(); - - disk_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_disk_free(em); } /***************************************************************** @@ -590,17 +610,12 @@ void *ebpf_disk_read_hash(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_LATENCY_DISK_SLEEP_MS * em->update_every; - while (ebpf_disk_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_disk_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); read_hard_disk_tables(disk_maps[NETDATA_DISK_READ].map_fd); } - ebpf_disk_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -805,29 +820,26 @@ void *ebpf_disk_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = disk_maps; - if (!em->enabled) - goto enddisk; - if (ebpf_disk_enable_tracepoints()) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto enddisk; } avl_init_lock(&disk_tree, ebpf_compare_disks); if (read_local_disks()) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto enddisk; } if (pthread_mutex_init(&plot_mutex, NULL)) { - em->enabled = 0; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; error("Cannot initialize local mutex"); goto enddisk; } em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { - em->enabled = 0; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto enddisk; } @@ -845,8 +857,7 @@ void *ebpf_disk_thread(void *ptr) disk_collector(em); enddisk: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); diff --git a/collectors/ebpf.plugin/ebpf_disk.h b/collectors/ebpf.plugin/ebpf_disk.h index 8e58174b9..c14b887f8 100644 --- a/collectors/ebpf.plugin/ebpf_disk.h +++ b/collectors/ebpf.plugin/ebpf_disk.h @@ -72,7 +72,7 @@ typedef struct netdata_ebpf_publish_disk { extern struct config disk_config; -extern void *ebpf_disk_thread(void *ptr); +void *ebpf_disk_thread(void *ptr); #endif /* NETDATA_EBPF_DISK_H */ diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c index b4e577dad..30b7f22ce 100644 --- a/collectors/ebpf.plugin/ebpf_fd.c +++ b/collectors/ebpf.plugin/ebpf_fd.c @@ -29,15 +29,301 @@ struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; -struct netdata_static_thread fd_thread = {"FD KERNEL", NULL, NULL, 1, NULL, - NULL, NULL}; -static enum ebpf_threads_status ebpf_fd_exited = NETDATA_THREAD_EBPF_RUNNING; +struct netdata_static_thread fd_thread = {"FD KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL}; + static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER]; static netdata_idx_t *fd_values = NULL; netdata_fd_stat_t *fd_vector = NULL; netdata_fd_stat_t **fd_pid = NULL; +netdata_ebpf_targets_t fd_targets[] = { {.name = "open", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = "close", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}}; + +#ifdef LIBBPF_MAJOR_VERSION +#include "includes/fd.skel.h" // BTF code + +static struct fd_bpf *bpf_obj = NULL; + +/** + * Disable probe + * + * Disable all probes to use exclusively another method. + * + * @param obj is the main structure for bpf objects +*/ +static inline void ebpf_fd_disable_probes(struct fd_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_sys_open_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_sys_open_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_release_task_fd_kprobe, false); + if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) { + bpf_program__set_autoload(obj->progs.netdata___close_fd_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata___close_fd_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_close_fd_kprobe, false); + } else { + bpf_program__set_autoload(obj->progs.netdata___close_fd_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_close_fd_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_close_fd_kprobe, false); + } +} + +/* + * Disable specific probe + * + * Disable probes according the kernel version + * + * @param obj is the main structure for bpf objects + */ +static inline void ebpf_disable_specific_probes(struct fd_bpf *obj) +{ + if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) { + bpf_program__set_autoload(obj->progs.netdata___close_fd_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata___close_fd_kprobe, false); + } else { + bpf_program__set_autoload(obj->progs.netdata_close_fd_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_close_fd_kprobe, false); + } +} + +/* + * Disable trampoline + * + * Disable all trampoline to use exclusively another method. + * + * @param obj is the main structure for bpf objects. + */ +static inline void ebpf_disable_trampoline(struct fd_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_sys_open_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_sys_open_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_close_fd_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_close_fd_fexit, false); + bpf_program__set_autoload(obj->progs.netdata___close_fd_fentry, false); + bpf_program__set_autoload(obj->progs.netdata___close_fd_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_release_task_fd_fentry, false); +} + +/* + * Disable specific trampoline + * + * Disable trampoline according to kernel version. + * + * @param obj is the main structure for bpf objects. + */ +static inline void ebpf_disable_specific_trampoline(struct fd_bpf *obj) +{ + if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) { + bpf_program__set_autoload(obj->progs.netdata___close_fd_fentry, false); + bpf_program__set_autoload(obj->progs.netdata___close_fd_fexit, false); + } else { + bpf_program__set_autoload(obj->progs.netdata_close_fd_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_close_fd_fexit, false); + } +} + +/** + * Set trampoline target + * + * Set the targets we will monitor. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_set_trampoline_target(struct fd_bpf *obj) +{ + bpf_program__set_attach_target(obj->progs.netdata_sys_open_fentry, 0, fd_targets[NETDATA_FD_SYSCALL_OPEN].name); + bpf_program__set_attach_target(obj->progs.netdata_sys_open_fexit, 0, fd_targets[NETDATA_FD_SYSCALL_OPEN].name); + bpf_program__set_attach_target(obj->progs.netdata_release_task_fd_fentry, 0, EBPF_COMMON_FNCT_CLEAN_UP); + + if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) { + bpf_program__set_attach_target( + obj->progs.netdata_close_fd_fentry, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name); + bpf_program__set_attach_target(obj->progs.netdata_close_fd_fexit, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name); + } else { + bpf_program__set_attach_target( + obj->progs.netdata___close_fd_fentry, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name); + bpf_program__set_attach_target( + obj->progs.netdata___close_fd_fexit, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name); + } +} + +/** + * Mount Attach Probe + * + * Attach probes to target + * + * @param obj is the main structure for bpf objects. + * + * @return It returns 0 on success and -1 otherwise. + */ +static int ebpf_fd_attach_probe(struct fd_bpf *obj) +{ + obj->links.netdata_sys_open_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_sys_open_kprobe, false, + fd_targets[NETDATA_FD_SYSCALL_OPEN].name); + int ret = libbpf_get_error(obj->links.netdata_sys_open_kprobe); + if (ret) + return -1; + + obj->links.netdata_sys_open_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_sys_open_kretprobe, true, + fd_targets[NETDATA_FD_SYSCALL_OPEN].name); + ret = libbpf_get_error(obj->links.netdata_sys_open_kretprobe); + if (ret) + return -1; + + obj->links.netdata_release_task_fd_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_release_task_fd_kprobe, + false, + EBPF_COMMON_FNCT_CLEAN_UP); + ret = libbpf_get_error(obj->links.netdata_release_task_fd_kprobe); + if (ret) + return -1; + + if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) { + obj->links.netdata_close_fd_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_close_fd_kretprobe, true, + fd_targets[NETDATA_FD_SYSCALL_CLOSE].name); + ret = libbpf_get_error(obj->links.netdata_close_fd_kretprobe); + if (ret) + return -1; + + obj->links.netdata_close_fd_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_close_fd_kprobe, false, + fd_targets[NETDATA_FD_SYSCALL_CLOSE].name); + ret = libbpf_get_error(obj->links.netdata_close_fd_kprobe); + if (ret) + return -1; + } else { + obj->links.netdata___close_fd_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata___close_fd_kretprobe, + true, + fd_targets[NETDATA_FD_SYSCALL_CLOSE].name); + ret = libbpf_get_error(obj->links.netdata___close_fd_kretprobe); + if (ret) + return -1; + + obj->links.netdata___close_fd_kprobe = bpf_program__attach_kprobe(obj->progs.netdata___close_fd_kprobe, + false, + fd_targets[NETDATA_FD_SYSCALL_CLOSE].name); + ret = libbpf_get_error(obj->links.netdata___close_fd_kprobe); + if (ret) + return -1; + } + + return 0; +} + +/** + * Set target values + * + * Set pointers used to laod data. + */ +static void ebpf_fd_set_target_values() +{ + static char *close_targets[] = {"close_fd", "__close_fd"}; + static char *open_targets[] = {"do_sys_openat2", "do_sys_open"}; + if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) { + fd_targets[NETDATA_FD_SYSCALL_OPEN].name = open_targets[0]; + fd_targets[NETDATA_FD_SYSCALL_CLOSE].name = close_targets[0]; + } else { + fd_targets[NETDATA_FD_SYSCALL_OPEN].name = open_targets[1]; + fd_targets[NETDATA_FD_SYSCALL_CLOSE].name = close_targets[1]; + } +} + +/** + * Set hash tables + * + * Set the values for maps according the value given by kernel. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_fd_set_hash_tables(struct fd_bpf *obj) +{ + fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd = bpf_map__fd(obj->maps.tbl_fd_global); + fd_maps[NETDATA_FD_PID_STATS].map_fd = bpf_map__fd(obj->maps.tbl_fd_pid); + fd_maps[NETDATA_FD_CONTROLLER].map_fd = bpf_map__fd(obj->maps.fd_ctrl); +} + +/** + * Adjust Map Size + * + * Resize maps according input from users. + * + * @param obj is the main structure for bpf objects. + * @param em structure with configuration + */ +static void ebpf_fd_adjust_map_size(struct fd_bpf *obj, ebpf_module_t *em) +{ + ebpf_update_map_size(obj->maps.tbl_fd_pid, &fd_maps[NETDATA_FD_PID_STATS], + em, bpf_map__name(obj->maps.tbl_fd_pid)); +} + +/** + * Disable Release Task + * + * Disable release task when apps is not enabled. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_fd_disable_release_task(struct fd_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_release_task_fd_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_release_task_fd_fentry, false); +} + +/** + * Load and attach + * + * Load and attach the eBPF code in kernel. + * + * @param obj is the main structure for bpf objects. + * @param em structure with configuration + * + * @return it returns 0 on succes and -1 otherwise + */ +static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em) +{ + netdata_ebpf_targets_t *mt = em->targets; + netdata_ebpf_program_loaded_t test = mt[NETDATA_FD_SYSCALL_OPEN].mode; + + ebpf_fd_set_target_values(); + if (test == EBPF_LOAD_TRAMPOLINE) { + ebpf_fd_disable_probes(obj); + ebpf_disable_specific_trampoline(obj); + + ebpf_set_trampoline_target(obj); + // TODO: Remove this in next PR, because this specific trampoline has an error. + bpf_program__set_autoload(obj->progs.netdata_release_task_fd_fentry, false); + } else { + ebpf_disable_trampoline(obj); + ebpf_disable_specific_probes(obj); + } + + ebpf_fd_adjust_map_size(obj, em); + + if (!em->apps_charts && !em->cgroup_charts) + ebpf_fd_disable_release_task(obj); + + int ret = fd_bpf__load(obj); + if (ret) { + return ret; + } + + ret = (test == EBPF_LOAD_TRAMPOLINE) ? fd_bpf__attach(obj) : ebpf_fd_attach_probe(obj); + if (!ret) { + ebpf_fd_set_hash_tables(obj); + + ebpf_update_controller(fd_maps[NETDATA_CACHESTAT_CTRL].map_fd, em); + } + + return ret; +} +#endif + /***************************************************************** * * FUNCTIONS TO CLOSE THE THREAD @@ -45,6 +331,38 @@ netdata_fd_stat_t **fd_pid = NULL; *****************************************************************/ /** + * FD Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_fd_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + ebpf_cleanup_publish_syscall(fd_publish_aggregated); + freez(fd_thread.thread); + freez(fd_values); + freez(fd_vector); + +#ifdef LIBBPF_MAJOR_VERSION + if (bpf_obj) + fd_bpf__destroy(bpf_obj); +#endif + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} + +/** * FD Exit * * Cancel child thread and exit. @@ -54,12 +372,8 @@ netdata_fd_stat_t **fd_pid = NULL; static void ebpf_fd_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_fd_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*fd_thread.thread); + ebpf_fd_free(em); } /** @@ -70,16 +384,7 @@ static void ebpf_fd_exit(void *ptr) static void ebpf_fd_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_fd_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - ebpf_cleanup_publish_syscall(fd_publish_aggregated); - freez(fd_thread.thread); - freez(fd_values); - freez(fd_vector); - - fd_thread.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_fd_free(em); } /***************************************************************** @@ -153,17 +458,12 @@ void *ebpf_fd_read_hash(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_FD_SLEEP_MS * em->update_every; - while (ebpf_fd_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_fd_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); read_global_table(); } - ebpf_fd_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -549,20 +849,15 @@ static void ebpf_create_systemd_fd_charts(ebpf_module_t *em) * Send collected data to Netdata. * * @param em the main collector structure - * - * @return It returns the status for chart creation, if it is necessary to remove a specific dimension zero is returned - * otherwise function returns 1 to avoid chart recreation */ -static int ebpf_send_systemd_fd_charts(ebpf_module_t *em) +static void ebpf_send_systemd_fd_charts(ebpf_module_t *em) { - int ret = 1; ebpf_cgroup_target_t *ect; write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_fd.open_call); - } else if (unlikely(ect->systemd)) - ret = 0; + } } write_end_chart(); @@ -593,8 +888,6 @@ static int ebpf_send_systemd_fd_charts(ebpf_module_t *em) } write_end_chart(); } - - return ret; } /** @@ -615,13 +908,11 @@ static void ebpf_fd_send_cgroup_data(ebpf_module_t *em) int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; if (has_systemd) { - static int systemd_charts = 0; - if (!systemd_charts) { + if (send_cgroup_chart) { ebpf_create_systemd_fd_charts(em); - systemd_charts = 1; } - systemd_charts = ebpf_send_systemd_fd_charts(em); + ebpf_send_systemd_fd_charts(em); } for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { @@ -810,6 +1101,40 @@ static void ebpf_fd_allocate_global_vectors(int apps) fd_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t)); } +/* + * Load BPF + * + * Load BPF files. + * + * @param em the structure with configuration + */ +static int ebpf_fd_load_bpf(ebpf_module_t *em) +{ + int ret = 0; + ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_FD_SYSCALL_OPEN].mode); + if (em->load & EBPF_LOAD_LEGACY) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { + em->enabled = CONFIG_BOOLEAN_NO; + ret = -1; + } + } +#ifdef LIBBPF_MAJOR_VERSION + else { + bpf_obj = fd_bpf__open(); + if (!bpf_obj) + ret = -1; + else + ret = ebpf_fd_load_and_attach(bpf_obj, em); + } +#endif + + if (ret) + error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->thread_name); + + return ret; +} + /** * Directory Cache thread * @@ -826,17 +1151,16 @@ void *ebpf_fd_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = fd_maps; - if (!em->enabled) +#ifdef LIBBPF_MAJOR_VERSION + ebpf_adjust_thread_load(em, default_btf); +#endif + if (ebpf_fd_load_bpf(em)) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endfd; + } ebpf_fd_allocate_global_vectors(em->apps_charts); - em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); - if (!em->probe_links) { - em->enabled = CONFIG_BOOLEAN_NO; - goto endfd; - } - int algorithms[NETDATA_FD_SYSCALL_END] = { NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX }; @@ -852,8 +1176,7 @@ void *ebpf_fd_thread(void *ptr) fd_collector(em); endfd: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_fd.h b/collectors/ebpf.plugin/ebpf_fd.h index 8742558df..914a34b98 100644 --- a/collectors/ebpf.plugin/ebpf_fd.h +++ b/collectors/ebpf.plugin/ebpf_fd.h @@ -75,10 +75,11 @@ enum fd_syscalls { }; -extern void *ebpf_fd_thread(void *ptr); -extern void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr); +void *ebpf_fd_thread(void *ptr); +void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr); extern struct config fd_config; extern netdata_fd_stat_t **fd_pid; +extern netdata_ebpf_targets_t fd_targets[]; #endif /* NETDATA_EBPF_FD_H */ diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c index bc767fbc9..7dbec7410 100644 --- a/collectors/ebpf.plugin/ebpf_filesystem.c +++ b/collectors/ebpf.plugin/ebpf_filesystem.c @@ -30,10 +30,16 @@ static ebpf_local_maps_t fs_maps[] = {{.name = "tbl_ext4", .internal_input = NET .type = NETDATA_EBPF_MAP_CONTROLLER, .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; -struct netdata_static_thread filesystem_threads = {"EBPF FS READ", - NULL, NULL, 1, NULL, - NULL, NULL }; -static enum ebpf_threads_status ebpf_fs_exited = NETDATA_THREAD_EBPF_RUNNING; +struct netdata_static_thread filesystem_threads = { + .name = "EBPF FS READ", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; static netdata_syscall_stat_t filesystem_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS]; static netdata_publish_syscall_t filesystem_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS]; @@ -317,22 +323,41 @@ void ebpf_filesystem_cleanup_ebpf_data() freez(efp->hadditional.name); freez(efp->hadditional.title); - - struct bpf_link **probe_links = efp->probe_links; - size_t j = 0 ; - struct bpf_program *prog; - bpf_object__for_each_program(prog, efp->objects) { - bpf_link__destroy(probe_links[j]); - j++; - } - freez(probe_links); - if (efp->objects) - bpf_object__close(efp->objects); } } } /** + * Filesystem Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_filesystem_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + freez(filesystem_threads.thread); + ebpf_cleanup_publish_syscall(filesystem_publish_aggregated); + + ebpf_filesystem_cleanup_ebpf_data(); + if (dimensions) + ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS); + freez(filesystem_hash_values); + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} + +/** * Filesystem exit * * Cancel child thread. @@ -342,12 +367,8 @@ void ebpf_filesystem_cleanup_ebpf_data() static void ebpf_filesystem_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_fs_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*filesystem_threads.thread); + ebpf_filesystem_free(em); } /** @@ -360,19 +381,7 @@ static void ebpf_filesystem_exit(void *ptr) static void ebpf_filesystem_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_fs_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - freez(filesystem_threads.thread); - ebpf_cleanup_publish_syscall(filesystem_publish_aggregated); - - ebpf_filesystem_cleanup_ebpf_data(); - if (dimensions) - ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS); - freez(filesystem_hash_values); - - filesystem_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_filesystem_free(em); } /***************************************************************** @@ -483,11 +492,8 @@ void *ebpf_filesystem_read_hash(void *ptr) heartbeat_init(&hb); usec_t step = NETDATA_FILESYSTEM_READ_SLEEP_MS * em->update_every; int update_every = em->update_every; - while (ebpf_fs_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_fs_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); (void) ebpf_update_partitions(em); ebpf_obsolete_fs_charts(update_every); @@ -499,8 +505,6 @@ void *ebpf_filesystem_read_hash(void *ptr) read_filesystem_tables(); } - ebpf_fs_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -603,9 +607,6 @@ void *ebpf_filesystem_thread(void *ptr) em->maps = fs_maps; ebpf_update_filesystem(); - if (!em->enabled) - goto endfilesystem; - // Initialize optional as zero, to identify when there are not partitions to monitor em->optional = 0; @@ -613,7 +614,7 @@ void *ebpf_filesystem_thread(void *ptr) if (em->optional) info("Netdata cannot monitor the filesystems used on this host."); - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endfilesystem; } @@ -630,8 +631,7 @@ void *ebpf_filesystem_thread(void *ptr) filesystem_collector(em); endfilesystem: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_filesystem.h b/collectors/ebpf.plugin/ebpf_filesystem.h index f6a10c874..0d558df7d 100644 --- a/collectors/ebpf.plugin/ebpf_filesystem.h +++ b/collectors/ebpf.plugin/ebpf_filesystem.h @@ -43,7 +43,7 @@ enum netdata_filesystem_table { NETDATA_ADDR_FS_TABLE }; -extern void *ebpf_filesystem_thread(void *ptr); +void *ebpf_filesystem_thread(void *ptr); extern struct config fs_config; #endif /* NETDATA_EBPF_FILESYSTEM_H */ diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c index 41a881647..b07dd24ca 100644 --- a/collectors/ebpf.plugin/ebpf_hardirq.c +++ b/collectors/ebpf.plugin/ebpf_hardirq.c @@ -135,10 +135,43 @@ static hardirq_ebpf_val_t *hardirq_ebpf_vals = NULL; // tmp store for static hard IRQ values we get from a per-CPU eBPF map. static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL; -static struct netdata_static_thread hardirq_threads = {"HARDIRQ KERNEL", - NULL, NULL, 1, NULL, - NULL, NULL }; -static enum ebpf_threads_status ebpf_hardirq_exited = NETDATA_THREAD_EBPF_RUNNING; +static struct netdata_static_thread hardirq_threads = { + .name = "HARDIRQ KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; + +/** + * Hardirq Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_hardirq_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + freez(hardirq_threads.thread); + for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) { + ebpf_disable_tracepoint(&hardirq_tracepoints[i]); + } + freez(hardirq_ebpf_vals); + freez(hardirq_ebpf_static_vals); + + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; +} /** * Hardirq Exit @@ -150,12 +183,8 @@ static enum ebpf_threads_status ebpf_hardirq_exited = NETDATA_THREAD_EBPF_RUNNIN static void hardirq_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_hardirq_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*hardirq_threads.thread); + ebpf_hardirq_free(em); } /** @@ -168,19 +197,7 @@ static void hardirq_exit(void *ptr) static void hardirq_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - - if (ebpf_hardirq_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - freez(hardirq_threads.thread); - for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) { - ebpf_disable_tracepoint(&hardirq_tracepoints[i]); - } - freez(hardirq_ebpf_vals); - freez(hardirq_ebpf_static_vals); - - hardirq_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_hardirq_free(em); } /***************************************************************** @@ -323,16 +340,12 @@ static void *hardirq_reader(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_HARDIRQ_SLEEP_MS * em->update_every; - while (ebpf_hardirq_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - UNUSED(dt); - if (ebpf_hardirq_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); hardirq_read_latency_map(hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd); hardirq_read_latency_static_map(hardirq_maps[HARDIRQ_MAP_LATENCY_STATIC].map_fd); } - ebpf_hardirq_exited = NETDATA_THREAD_EBPF_STOPPED; netdata_thread_cleanup_pop(1); return NULL; @@ -472,26 +485,21 @@ void *ebpf_hardirq_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = hardirq_maps; - if (!em->enabled) { - goto endhardirq; - } - if (ebpf_enable_tracepoints(hardirq_tracepoints) == 0) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endhardirq; } em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endhardirq; } hardirq_collector(em); endhardirq: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); diff --git a/collectors/ebpf.plugin/ebpf_hardirq.h b/collectors/ebpf.plugin/ebpf_hardirq.h index 4c8a7a098..381da57d8 100644 --- a/collectors/ebpf.plugin/ebpf_hardirq.h +++ b/collectors/ebpf.plugin/ebpf_hardirq.h @@ -68,6 +68,6 @@ typedef struct hardirq_static_val { } hardirq_static_val_t; extern struct config hardirq_config; -extern void *ebpf_hardirq_thread(void *ptr); +void *ebpf_hardirq_thread(void *ptr); #endif /* NETDATA_EBPF_HARDIRQ_H */ diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c index 4dca04505..dc805da23 100644 --- a/collectors/ebpf.plugin/ebpf_mdflush.c +++ b/collectors/ebpf.plugin/ebpf_mdflush.c @@ -35,10 +35,39 @@ static avl_tree_lock mdflush_pub; // tmp store for mdflush values we get from a per-CPU eBPF map. static mdflush_ebpf_val_t *mdflush_ebpf_vals = NULL; -static struct netdata_static_thread mdflush_threads = {"MDFLUSH KERNEL", - NULL, NULL, 1, NULL, - NULL, NULL }; -static enum ebpf_threads_status ebpf_mdflush_exited = NETDATA_THREAD_EBPF_RUNNING; +static struct netdata_static_thread mdflush_threads = { + .name = "MDFLUSH KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; + +/** + * MDflush Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_mdflush_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + freez(mdflush_ebpf_vals); + freez(mdflush_threads.thread); + + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; +} /** * MDflush exit @@ -50,12 +79,7 @@ static enum ebpf_threads_status ebpf_mdflush_exited = NETDATA_THREAD_EBPF_RUNNIN static void mdflush_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_mdflush_exited = NETDATA_THREAD_EBPF_STOPPING; + ebpf_mdflush_free(em); } /** @@ -68,14 +92,8 @@ static void mdflush_exit(void *ptr) static void mdflush_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_mdflush_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - freez(mdflush_ebpf_vals); - freez(mdflush_threads.thread); - - mdflush_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + netdata_thread_cancel(*mdflush_threads.thread); + ebpf_mdflush_free(em); } /** @@ -182,17 +200,12 @@ static void *mdflush_reader(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_MDFLUSH_SLEEP_MS * em->update_every; - while (ebpf_mdflush_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - UNUSED(dt); - if (ebpf_mdflush_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); mdflush_read_count_map(); } - ebpf_mdflush_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -295,26 +308,25 @@ void *ebpf_mdflush_thread(void *ptr) char *md_flush_request = ebpf_find_symbol("md_flush_request"); if (!md_flush_request) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; error("Cannot monitor MD devices, because md is not loaded."); } freez(md_flush_request); - if (!em->enabled) { + if (em->thread->enabled == NETDATA_THREAD_EBPF_STOPPED) { goto endmdflush; } em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { - em->enabled = CONFIG_BOOLEAN_NO; + em->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endmdflush; } mdflush_collector(em); endmdflush: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); diff --git a/collectors/ebpf.plugin/ebpf_mdflush.h b/collectors/ebpf.plugin/ebpf_mdflush.h index 59856ad67..b04eefd28 100644 --- a/collectors/ebpf.plugin/ebpf_mdflush.h +++ b/collectors/ebpf.plugin/ebpf_mdflush.h @@ -35,7 +35,7 @@ typedef struct netdata_mdflush { uint64_t cnt; } netdata_mdflush_t; -extern void *ebpf_mdflush_thread(void *ptr); +void *ebpf_mdflush_thread(void *ptr); extern struct config mdflush_config; diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c index bca467bce..ec1f07a65 100644 --- a/collectors/ebpf.plugin/ebpf_mount.c +++ b/collectors/ebpf.plugin/ebpf_mount.c @@ -22,14 +22,20 @@ static netdata_idx_t *mount_values = NULL; static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END]; -struct netdata_static_thread mount_thread = {"MOUNT KERNEL", - NULL, NULL, 1, NULL, - NULL, NULL}; +struct netdata_static_thread mount_thread = { + .name = "MOUNT KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; netdata_ebpf_targets_t mount_targets[] = { {.name = "mount", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "umount", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}}; -static enum ebpf_threads_status ebpf_mount_exited = NETDATA_THREAD_EBPF_RUNNING; #ifdef LIBBPF_MAJOR_VERSION #include "includes/mount.skel.h" // BTF code @@ -224,6 +230,36 @@ static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_ *****************************************************************/ /** + * Mount Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_mount_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + freez(mount_thread.thread); + freez(mount_values); + +#ifdef LIBBPF_MAJOR_VERSION + if (bpf_obj) + mount_bpf__destroy(bpf_obj); +#endif + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} + +/** * Mount Exit * * Cancel child thread. @@ -233,12 +269,8 @@ static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_ static void ebpf_mount_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_mount_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*mount_thread.thread); + ebpf_mount_free(em); } /** @@ -251,19 +283,7 @@ static void ebpf_mount_exit(void *ptr) static void ebpf_mount_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_mount_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - freez(mount_thread.thread); - freez(mount_values); - -#ifdef LIBBPF_MAJOR_VERSION - if (bpf_obj) - mount_bpf__destroy(bpf_obj); -#endif - - mount_thread.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_mount_free(em); } /***************************************************************** @@ -317,17 +337,12 @@ void *ebpf_mount_read_hash(void *ptr) usec_t step = NETDATA_LATENCY_MOUNT_SLEEP_MS * em->update_every; //This will be cancelled by its parent - while (ebpf_mount_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_mount_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); read_global_table(); } - ebpf_mount_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -435,7 +450,7 @@ static void ebpf_create_mount_charts(int update_every) static int ebpf_mount_load_bpf(ebpf_module_t *em) { int ret = 0; - if (em->load == EBPF_LOAD_LEGACY) { + if (em->load & EBPF_LOAD_LEGACY) { em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; @@ -474,14 +489,11 @@ void *ebpf_mount_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = mount_maps; - if (!em->enabled) - goto endmount; - #ifdef LIBBPF_MAJOR_VERSION ebpf_adjust_thread_load(em, default_btf); #endif if (ebpf_mount_load_bpf(em)) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endmount; } @@ -498,8 +510,7 @@ void *ebpf_mount_thread(void *ptr) mount_collector(em); endmount: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_mount.h b/collectors/ebpf.plugin/ebpf_mount.h index d4f48efd1..5a8d11a59 100644 --- a/collectors/ebpf.plugin/ebpf_mount.h +++ b/collectors/ebpf.plugin/ebpf_mount.h @@ -38,7 +38,7 @@ enum netdata_mount_syscalls { }; extern struct config mount_config; -extern void *ebpf_mount_thread(void *ptr); +void *ebpf_mount_thread(void *ptr); extern netdata_ebpf_targets_t mount_targets[]; #endif /* NETDATA_EBPF_MOUNT_H */ diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c index 33f505b0e..d93e4159e 100644 --- a/collectors/ebpf.plugin/ebpf_oomkill.c +++ b/collectors/ebpf.plugin/ebpf_oomkill.c @@ -46,8 +46,7 @@ static netdata_publish_syscall_t oomkill_publish_aggregated = {.name = "oomkill" static void oomkill_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - - em->enabled = NETDATA_MAIN_THREAD_EXITED; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; } static void oomkill_write_data(int32_t *keys, uint32_t total) @@ -132,25 +131,18 @@ static void ebpf_create_systemd_oomkill_charts(int update_every) * Send Systemd charts * * Send collected data to Netdata. - * - * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned - * otherwise function returns 1 to avoid chart recreation */ -static int ebpf_send_systemd_oomkill_charts() +static void ebpf_send_systemd_oomkill_charts() { - int ret = 1; ebpf_cgroup_target_t *ect; write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_OOMKILL_CHART); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->oomkill); ect->oomkill = 0; - } else if (unlikely(ect->systemd)) - ret = 0; + } } write_end_chart(); - - return ret; } /* @@ -199,12 +191,10 @@ void ebpf_oomkill_send_cgroup_data(int update_every) int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; if (has_systemd) { - static int systemd_charts = 0; - if (!systemd_charts) { + if (send_cgroup_chart) { ebpf_create_systemd_oomkill_charts(update_every); - systemd_charts = 1; } - systemd_charts = ebpf_send_systemd_oomkill_charts(); + ebpf_send_systemd_oomkill_charts(); } for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { @@ -367,27 +357,33 @@ void *ebpf_oomkill_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = oomkill_maps; +#define NETDATA_DEFAULT_OOM_DISABLED_MSG "Disabling OOMKILL thread, because" if (unlikely(!all_pids || !em->apps_charts)) { // When we are not running integration with apps, we won't fill necessary variables for this thread to run, so // we need to disable it. - if (em->enabled) - info("Disabling OOMKILL thread, because apps integration is completely disabled."); + if (em->thread->enabled) + info("%s apps integration is completely disabled.", NETDATA_DEFAULT_OOM_DISABLED_MSG); + + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + } else if (running_on_kernel < NETDATA_EBPF_KERNEL_4_14) { + if (em->thread->enabled) + info("%s kernel does not have necessary tracepoints.", NETDATA_DEFAULT_OOM_DISABLED_MSG); - em->enabled = 0; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; } - if (!em->enabled) { + if (em->thread->enabled == NETDATA_THREAD_EBPF_STOPPED) { goto endoomkill; } if (ebpf_enable_tracepoints(oomkill_tracepoints) == 0) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endoomkill; } em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endoomkill; } @@ -398,8 +394,7 @@ void *ebpf_oomkill_thread(void *ptr) oomkill_collector(em); endoomkill: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); diff --git a/collectors/ebpf.plugin/ebpf_oomkill.h b/collectors/ebpf.plugin/ebpf_oomkill.h index b5f04c74c..786086384 100644 --- a/collectors/ebpf.plugin/ebpf_oomkill.h +++ b/collectors/ebpf.plugin/ebpf_oomkill.h @@ -26,7 +26,7 @@ typedef uint8_t oomkill_ebpf_val_t; #define NETDATA_CGROUP_OOMKILLS_CONTEXT "cgroup.oomkills" extern struct config oomkill_config; -extern void *ebpf_oomkill_thread(void *ptr); -extern void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr); +void *ebpf_oomkill_thread(void *ptr); +void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr); #endif /* NETDATA_EBPF_OOMKILL_H */ diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c index f6b379a51..682577da7 100644 --- a/collectors/ebpf.plugin/ebpf_process.c +++ b/collectors/ebpf.plugin/ebpf_process.c @@ -46,6 +46,7 @@ ebpf_process_stat_t **global_process_stats = NULL; ebpf_process_publish_apps_t **current_apps_data = NULL; int process_enabled = 0; +bool publish_internal_metrics = true; struct config process_config = { .first_section = NULL, .last_section = NULL, @@ -53,13 +54,20 @@ struct config process_config = { .first_section = NULL, .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; -static struct netdata_static_thread cgroup_thread = {"EBPF CGROUP", NULL, NULL, - 1, NULL, NULL, NULL}; -static enum ebpf_threads_status ebpf_process_exited = NETDATA_THREAD_EBPF_RUNNING; - static char *threads_stat[NETDATA_EBPF_THREAD_STAT_END] = {"total", "running"}; static char *load_event_stat[NETDATA_EBPF_LOAD_STAT_END] = {"legacy", "co-re"}; +static struct netdata_static_thread cgroup_thread = { + .name = "EBPF CGROUP", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; + /***************************************************************** * * PROCESS DATA AND SEND TO NETDATA @@ -319,6 +327,55 @@ static void ebpf_process_update_apps_data() } /** + * Cgroup Exit + * + * Function used with netdata_thread_clean_push + * + * @param ptr unused argument + */ +static void ebpf_cgroup_exit(void *ptr) +{ + UNUSED(ptr); +} + +/** + * Cgroup update shm + * + * This is the thread callback. + * This thread is necessary, because we cannot freeze the whole plugin to read the data from shared memory. + * + * @param ptr It is a NULL value for this thread. + * + * @return It always returns NULL. + */ +void *ebpf_cgroup_update_shm(void *ptr) +{ + netdata_thread_cleanup_push(ebpf_cgroup_exit, ptr); + heartbeat_t hb; + heartbeat_init(&hb); + + usec_t step = 3 * USEC_PER_SEC; + int counter = NETDATA_EBPF_CGROUP_UPDATE - 1; + //This will be cancelled by its parent + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + + // We are using a small heartbeat time to wake up thread, + // but we should not update so frequently the shared memory data + if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) { + counter = 0; + if (!shm_ebpf_cgroup.header) + ebpf_map_cgroup_shared_memory(); + + ebpf_parse_cgroup_shm_data(); + } + } + + netdata_thread_cleanup_pop(1); + return NULL; +} + +/** * Update cgroup * * Update cgroup data based in @@ -493,6 +550,21 @@ static inline void ebpf_create_statistic_load_chart(ebpf_module_t *em) } /** + * Update Internal Metric variable + * + * By default eBPF.plugin sends internal metrics for netdata, but user can + * disable this. + * + * The function updates the variable used to send charts. + */ +static void update_internal_metric_variable() +{ + const char *s = getenv("NETDATA_INTERNALS_MONITORING"); + if (s && *s && strcmp(s, "NO") == 0) + publish_internal_metrics = false; +} + +/** * Create Statistics Charts * * Create charts that will show statistics related to eBPF plugin. @@ -501,6 +573,10 @@ static inline void ebpf_create_statistic_load_chart(ebpf_module_t *em) */ static void ebpf_create_statistic_charts(ebpf_module_t *em) { + update_internal_metric_variable(); + if (!publish_internal_metrics) + return; + ebpf_create_statistic_thread_chart(em); ebpf_create_statistic_load_chart(em); @@ -659,31 +735,17 @@ static void ebpf_process_disable_tracepoints() */ static void ebpf_process_exit(void *ptr) { - (void)ptr; - ebpf_process_exited = NETDATA_THREAD_EBPF_STOPPING; -} - -/** - * Process cleanup - * - * Cleanup allocated memory. - * - * @param ptr thread data. - */ -static void ebpf_process_cleanup(void *ptr) -{ ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_process_exited != NETDATA_THREAD_EBPF_STOPPED) - return; ebpf_cleanup_publish_syscall(process_publish_aggregated); freez(process_hash_values); - freez(cgroup_thread.thread); ebpf_process_disable_tracepoints(); - cgroup_thread.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); + pthread_cancel(*cgroup_thread.thread); } /***************************************************************** @@ -692,47 +754,6 @@ static void ebpf_process_cleanup(void *ptr) * *****************************************************************/ -/** - * Cgroup update shm - * - * This is the thread callback. - * This thread is necessary, because we cannot freeze the whole plugin to read the data from shared memory. - * - * @param ptr It is a NULL value for this thread. - * - * @return It always returns NULL. - */ -void *ebpf_cgroup_update_shm(void *ptr) -{ - netdata_thread_cleanup_push(ebpf_process_cleanup, ptr); - heartbeat_t hb; - heartbeat_init(&hb); - - usec_t step = 3 * USEC_PER_SEC; - int counter = NETDATA_EBPF_CGROUP_UPDATE - 1; - //This will be cancelled by its parent - while (ebpf_process_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_process_exited == NETDATA_THREAD_EBPF_STOPPING) - break; - - // We are using a small heartbeat time to wake up thread, - // but we should not update so frequently the shared memory data - if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) { - counter = 0; - if (!shm_ebpf_cgroup.header) - ebpf_map_cgroup_shared_memory(); - - ebpf_parse_cgroup_shm_data(); - } - } - - ebpf_process_exited = NETDATA_THREAD_EBPF_STOPPED; - - netdata_thread_cleanup_pop(1); - return NULL; -} /** * Sum PIDs @@ -945,20 +966,15 @@ static void ebpf_create_systemd_process_charts(ebpf_module_t *em) * Send collected data to Netdata. * * @param em the structure with thread information - * - * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned - * otherwise function returns 1 to avoid chart recreation */ -static int ebpf_send_systemd_process_charts(ebpf_module_t *em) +static void ebpf_send_systemd_process_charts(ebpf_module_t *em) { - int ret = 1; ebpf_cgroup_target_t *ect; write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_ps.create_process); - } else if (unlikely(ect->systemd)) - ret = 0; + } } write_end_chart(); @@ -995,8 +1011,6 @@ static int ebpf_send_systemd_process_charts(ebpf_module_t *em) } write_end_chart(); } - - return ret; } /** @@ -1018,13 +1032,11 @@ static void ebpf_process_send_cgroup_data(ebpf_module_t *em) int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; if (has_systemd) { - static int systemd_chart = 0; - if (!systemd_chart) { + if (send_cgroup_chart) { ebpf_create_systemd_process_charts(em); - systemd_chart = 1; } - systemd_chart = ebpf_send_systemd_process_charts(em); + ebpf_send_systemd_process_charts(em); } for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { @@ -1071,6 +1083,9 @@ void ebpf_process_update_cgroup_algorithm() */ void ebpf_send_statistic_data() { + if (!publish_internal_metrics) + return; + write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_THREADS); write_chart_dimension(threads_stat[NETDATA_EBPF_THREAD_STAT_TOTAL], (long long)plugin_statistics.threads); write_chart_dimension(threads_stat[NETDATA_EBPF_THREAD_STAT_RUNNING], (long long)plugin_statistics.running); @@ -1089,11 +1104,12 @@ void ebpf_send_statistic_data() */ static void process_collector(ebpf_module_t *em) { + // Start cgroup integration before other threads cgroup_thread.thread = mallocz(sizeof(netdata_thread_t)); cgroup_thread.start_routine = ebpf_cgroup_update_shm; netdata_thread_create(cgroup_thread.thread, cgroup_thread.name, NETDATA_THREAD_OPTION_DEFAULT, - ebpf_cgroup_update_shm, em); + ebpf_cgroup_update_shm, NULL); heartbeat_t hb; heartbeat_init(&hb); @@ -1119,8 +1135,6 @@ static void process_collector(ebpf_module_t *em) update_apps_list = 0; cleanup_exited_pids(); collect_data_for_all_processes(pid_fd); - - ebpf_create_apps_charts(apps_groups_root_target); } pthread_mutex_unlock(&collect_data_mutex); @@ -1131,6 +1145,8 @@ static void process_collector(ebpf_module_t *em) netdata_apps_integration_flags_t apps_enabled = em->apps_charts; pthread_mutex_lock(&collect_data_mutex); + + ebpf_create_apps_charts(apps_groups_root_target); if (all_pids_count > 0) { if (apps_enabled) { ebpf_process_update_apps_data(); @@ -1211,34 +1227,6 @@ static void set_local_pointers() *****************************************************************/ /** - * - */ -static void wait_for_all_threads_die() -{ - ebpf_modules[EBPF_MODULE_PROCESS_IDX].enabled = 0; - - heartbeat_t hb; - heartbeat_init(&hb); - - int max = 10; - int i; - for (i = 0; i < max; i++) { - heartbeat_next(&hb, 200000); - - size_t j, counter = 0, compare = 0; - for (j = 0; ebpf_modules[j].thread_name; j++) { - if (!ebpf_modules[j].enabled) - counter++; - - compare++; - } - - if (counter == compare) - break; - } -} - -/** * Enable tracepoints * * Enable necessary tracepoints for thread. @@ -1334,7 +1322,6 @@ endprocess: if (!em->enabled) ebpf_update_disabled_plugin_stats(em); - wait_for_all_threads_die(); netdata_thread_cleanup_pop(1); return NULL; } diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c index bd928cbdc..f81287d82 100644 --- a/collectors/ebpf.plugin/ebpf_shm.c +++ b/collectors/ebpf.plugin/ebpf_shm.c @@ -34,9 +34,16 @@ static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = NULL, .internal_input = 0, .user_input = 0}}; -struct netdata_static_thread shm_threads = {"SHM KERNEL", NULL, NULL, 1, - NULL, NULL, NULL}; -static enum ebpf_threads_status ebpf_shm_exited = NETDATA_THREAD_EBPF_RUNNING; +struct netdata_static_thread shm_threads = { + .name = "SHM KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; netdata_ebpf_targets_t shm_targets[] = { {.name = "shmget", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "shmat", .mode = EBPF_LOAD_TRAMPOLINE}, @@ -83,6 +90,7 @@ static void ebpf_disable_probe(struct shm_bpf *obj) bpf_program__set_autoload(obj->progs.netdata_shmat_probe, false); bpf_program__set_autoload(obj->progs.netdata_shmdt_probe, false); bpf_program__set_autoload(obj->progs.netdata_shmctl_probe, false); + bpf_program__set_autoload(obj->progs.netdata_shm_release_task_probe, false); } /* @@ -98,6 +106,7 @@ static void ebpf_disable_trampoline(struct shm_bpf *obj) bpf_program__set_autoload(obj->progs.netdata_shmat_fentry, false); bpf_program__set_autoload(obj->progs.netdata_shmdt_fentry, false); bpf_program__set_autoload(obj->progs.netdata_shmctl_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_shm_release_task_fentry, false); } /** @@ -130,6 +139,9 @@ static void ebpf_set_trampoline_target(struct shm_bpf *obj) shm_targets[NETDATA_KEY_SHMCTL_CALL].name, running_on_kernel); bpf_program__set_attach_target(obj->progs.netdata_shmctl_fentry, 0, syscall); + + bpf_program__set_attach_target(obj->progs.netdata_shm_release_task_fentry, 0, + EBPF_COMMON_FNCT_CLEAN_UP); } /** @@ -177,6 +189,13 @@ static int ebpf_shm_attach_probe(struct shm_bpf *obj) if (ret) return -1; + obj->links.netdata_shm_release_task_probe = bpf_program__attach_kprobe(obj->progs.netdata_shm_release_task_probe, + false, EBPF_COMMON_FNCT_CLEAN_UP); + ret = (int)libbpf_get_error(obj->links.netdata_shm_release_task_probe); + if (ret) + return -1; + + return 0; } @@ -193,6 +212,33 @@ static void ebpf_shm_set_hash_tables(struct shm_bpf *obj) } /** + * Disable Release Task + * + * Disable release task when apps is not enabled. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_shm_disable_release_task(struct shm_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_shm_release_task_probe, false); + bpf_program__set_autoload(obj->progs.netdata_shm_release_task_fentry, false); +} + +/** + * Adjust Map Size + * + * Resize maps according input from users. + * + * @param obj is the main structure for bpf objects. + * @param em structure with configuration + */ +static void ebpf_shm_adjust_map_size(struct shm_bpf *obj, ebpf_module_t *em) +{ + ebpf_update_map_size(obj->maps.tbl_pid_shm, &shm_maps[NETDATA_PID_SHM_TABLE], + em, bpf_map__name(obj->maps.tbl_pid_shm)); +} + +/** * Load and attach * * Load and attach the eBPF code in kernel. @@ -221,6 +267,10 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e ebpf_disable_trampoline(obj); } + ebpf_shm_adjust_map_size(obj, em); + if (!em->apps_charts && !em->cgroup_charts) + ebpf_shm_disable_release_task(obj); + int ret = shm_bpf__load(obj); if (!ret) { if (test != EBPF_LOAD_PROBE && test != EBPF_LOAD_RETPROBE) @@ -240,6 +290,36 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e *****************************************************************/ /** + * SHM Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_shm_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + ebpf_cleanup_publish_syscall(shm_publish_aggregated); + + freez(shm_vector); + freez(shm_values); + +#ifdef LIBBPF_MAJOR_VERSION + if (bpf_obj) + shm_bpf__destroy(bpf_obj); +#endif + + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; +} + +/** * SHM Exit * * Cancel child thread. @@ -249,12 +329,8 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e static void ebpf_shm_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_shm_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*shm_threads.thread); + ebpf_shm_free(em); } /** @@ -267,21 +343,7 @@ static void ebpf_shm_exit(void *ptr) static void ebpf_shm_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_shm_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - ebpf_cleanup_publish_syscall(shm_publish_aggregated); - - freez(shm_vector); - freez(shm_values); - -#ifdef LIBBPF_MAJOR_VERSION - if (bpf_obj) - shm_bpf__destroy(bpf_obj); -#endif - - shm_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_shm_free(em); } /***************************************************************** @@ -463,17 +525,12 @@ void *ebpf_shm_read_hash(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_SHM_SLEEP_MS * em->update_every; - while (ebpf_shm_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_shm_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); read_global_table(); } - ebpf_shm_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -721,20 +778,15 @@ static void ebpf_create_systemd_shm_charts(int update_every) * Send Systemd charts * * Send collected data to Netdata. - * - * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned - * otherwise function returns 1 to avoid chart recreation */ -static int ebpf_send_systemd_shm_charts() +static void ebpf_send_systemd_shm_charts() { - int ret = 1; ebpf_cgroup_target_t *ect; write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMGET_CHART); for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_shm.get); - } else if (unlikely(ect->systemd)) - ret = 0; + } } write_end_chart(); @@ -761,8 +813,6 @@ static int ebpf_send_systemd_shm_charts() } } write_end_chart(); - - return ret; } /* @@ -810,13 +860,11 @@ void ebpf_shm_send_cgroup_data(int update_every) int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; if (has_systemd) { - static int systemd_charts = 0; - if (!systemd_charts) { + if (send_cgroup_chart) { ebpf_create_systemd_shm_charts(update_every); - systemd_charts = 1; } - systemd_charts = ebpf_send_systemd_shm_charts(); + ebpf_send_systemd_shm_charts(); } for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { @@ -1008,7 +1056,9 @@ static void ebpf_create_shm_charts(int update_every) static int ebpf_shm_load_bpf(ebpf_module_t *em) { int ret = 0; - if (em->load == EBPF_LOAD_LEGACY) { + + ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_SHMGET_CALL].mode); + if (em->load & EBPF_LOAD_LEGACY) { em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; @@ -1047,15 +1097,11 @@ void *ebpf_shm_thread(void *ptr) ebpf_update_pid_table(&shm_maps[NETDATA_PID_SHM_TABLE], em); - if (!em->enabled) { - goto endshm; - } - #ifdef LIBBPF_MAJOR_VERSION ebpf_adjust_thread_load(em, default_btf); #endif if (ebpf_shm_load_bpf(em)) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endshm; } @@ -1084,8 +1130,7 @@ void *ebpf_shm_thread(void *ptr) shm_collector(em); endshm: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_shm.h b/collectors/ebpf.plugin/ebpf_shm.h index 8e118a6f5..4e068819b 100644 --- a/collectors/ebpf.plugin/ebpf_shm.h +++ b/collectors/ebpf.plugin/ebpf_shm.h @@ -54,8 +54,8 @@ enum shm_counters { extern netdata_publish_shm_t **shm_pid; -extern void *ebpf_shm_thread(void *ptr); -extern void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr); +void *ebpf_shm_thread(void *ptr); +void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr); extern netdata_ebpf_targets_t shm_targets[]; extern struct config shm_config; diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c index ba63934d5..3a023e4a4 100644 --- a/collectors/ebpf.plugin/ebpf_socket.c +++ b/collectors/ebpf.plugin/ebpf_socket.c @@ -23,7 +23,7 @@ static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_cleanup_rbuf", static ebpf_local_maps_t socket_maps[] = {{.name = "tbl_bandwidth", .internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED, .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED, - .type = NETDATA_EBPF_MAP_STATIC, + .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID, .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = "tbl_global_sock", .internal_input = NETDATA_SOCKET_COUNTER, @@ -87,10 +87,16 @@ netdata_ebpf_targets_t socket_targets[] = { {.name = "inet_csk_accept", .mode = {.name = "tcp_v6_connect", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}}; -struct netdata_static_thread socket_threads = {"EBPF SOCKET READ", - NULL, NULL, 1, NULL, - NULL, NULL }; -static enum ebpf_threads_status ebpf_socket_exited = NETDATA_THREAD_EBPF_RUNNING; +struct netdata_static_thread socket_threads = { + .name = "EBPF SOCKET READ", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; #ifdef LIBBPF_MAJOR_VERSION #include "includes/socket.skel.h" // BTF code @@ -581,35 +587,21 @@ static void clean_ip_structure(ebpf_network_viewer_ip_list_t **clean) } /** - * Socket exit + * Socket Free * - * Clean up the main thread. + * Cleanup variables after child threads to stop * * @param ptr thread data. */ -static void ebpf_socket_exit(void *ptr) +static void ebpf_socket_free(ebpf_module_t *em ) { - ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); return; } - - ebpf_socket_exited = NETDATA_THREAD_EBPF_STOPPING; -} - -/** - * Socket cleanup - * - * Clean up allocated addresses. - * - * @param ptr thread data. - */ -void ebpf_socket_cleanup(void *ptr) -{ - ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_socket_exited != NETDATA_THREAD_EBPF_STOPPED) - return; + pthread_mutex_unlock(&ebpf_exit_cleanup); ebpf_cleanup_publish_syscall(socket_publish_aggregated); freez(socket_hash_values); @@ -639,8 +631,37 @@ void ebpf_socket_cleanup(void *ptr) if (bpf_obj) socket_bpf__destroy(bpf_obj); #endif - socket_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} + +/** + * Socket exit + * + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_socket_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + netdata_thread_cancel(*socket_threads.thread); + ebpf_socket_free(em); +} + +/** + * Socket cleanup + * + * Clean up allocated addresses. + * + * @param ptr thread data. + */ +void ebpf_socket_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + ebpf_socket_free(em); } /***************************************************************** @@ -2146,11 +2167,8 @@ void *ebpf_socket_read_hash(void *ptr) int fd_ipv4 = socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd; int fd_ipv6 = socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd; int network_connection = em->optional; - while (ebpf_socket_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_socket_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); pthread_mutex_lock(&nv_mutex); read_listen_table(); @@ -2160,8 +2178,6 @@ void *ebpf_socket_read_hash(void *ptr) pthread_mutex_unlock(&nv_mutex); } - ebpf_socket_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -2702,20 +2718,15 @@ static void ebpf_create_systemd_socket_charts(int update_every) * Send Systemd charts * * Send collected data to Netdata. - * - * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned - * otherwise function returns 1 to avoid chart recreation */ -static int ebpf_send_systemd_socket_charts() +static void ebpf_send_systemd_socket_charts() { - int ret = 1; ebpf_cgroup_target_t *ect; write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V4); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v4_connection); - } else if (unlikely(ect->systemd)) - ret = 0; + } } write_end_chart(); @@ -2723,8 +2734,7 @@ static int ebpf_send_systemd_socket_charts() for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v6_connection); - } else - ret = 0; + } } write_end_chart(); @@ -2732,8 +2742,7 @@ static int ebpf_send_systemd_socket_charts() for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_sent); - } else - ret = 0; + } } write_end_chart(); @@ -2784,8 +2793,6 @@ static int ebpf_send_systemd_socket_charts() } } write_end_chart(); - - return ret; } /** @@ -2821,12 +2828,10 @@ static void ebpf_socket_send_cgroup_data(int update_every) int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; if (has_systemd) { - static int systemd_charts = 0; - if (!systemd_charts) { + if (send_cgroup_chart) { ebpf_create_systemd_socket_charts(update_every); - systemd_charts = 1; } - systemd_charts = ebpf_send_systemd_socket_charts(); + ebpf_send_systemd_socket_charts(); } for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { @@ -3875,7 +3880,7 @@ static int ebpf_socket_load_bpf(ebpf_module_t *em) { int ret = 0; - if (em->load == EBPF_LOAD_LEGACY) { + if (em->load & EBPF_LOAD_LEGACY) { em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { ret = -1; @@ -3923,11 +3928,8 @@ void *ebpf_socket_thread(void *ptr) parse_service_name_section(&socket_config); parse_table_size_options(&socket_config); - if (!em->enabled) - goto endsocket; - if (pthread_mutex_init(&nv_mutex, NULL)) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; error("Cannot initialize local mutex"); goto endsocket; } @@ -3967,8 +3969,7 @@ void *ebpf_socket_thread(void *ptr) socket_collector((usec_t)(em->update_every * USEC_PER_SEC), em); endsocket: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h index 711225acf..ca6b193f0 100644 --- a/collectors/ebpf.plugin/ebpf_socket.h +++ b/collectors/ebpf.plugin/ebpf_socket.h @@ -356,12 +356,12 @@ typedef struct netdata_vector_plot { } netdata_vector_plot_t; -extern void clean_port_structure(ebpf_network_viewer_port_list_t **clean); +void clean_port_structure(ebpf_network_viewer_port_list_t **clean); extern ebpf_network_viewer_port_list_t *listen_ports; -extern void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *values); -extern void parse_network_viewer_section(struct config *cfg); -extern void fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table); -extern void parse_service_name_section(struct config *cfg); +void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *values); +void parse_network_viewer_section(struct config *cfg); +void fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table); +void parse_service_name_section(struct config *cfg); extern ebpf_socket_publish_apps_t **socket_bandwidth_curr; extern struct config socket_config; diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c index ed13f027f..3b5d15921 100644 --- a/collectors/ebpf.plugin/ebpf_softirq.c +++ b/collectors/ebpf.plugin/ebpf_softirq.c @@ -54,10 +54,45 @@ static softirq_val_t softirq_vals[] = { // tmp store for soft IRQ values we get from a per-CPU eBPF map. static softirq_ebpf_val_t *softirq_ebpf_vals = NULL; -static struct netdata_static_thread softirq_threads = {"SOFTIRQ KERNEL", - NULL, NULL, 1, NULL, - NULL, NULL }; -static enum ebpf_threads_status ebpf_softirq_exited = NETDATA_THREAD_EBPF_RUNNING; +static struct netdata_static_thread softirq_threads = { + .name = "SOFTIRQ KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; + +/** + * Cachestat Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_softirq_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + freez(softirq_threads.thread); + + for (int i = 0; softirq_tracepoints[i].class != NULL; i++) { + ebpf_disable_tracepoint(&softirq_tracepoints[i]); + } + freez(softirq_ebpf_vals); + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_MAIN_THREAD_EXITED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} /** * Exit @@ -69,12 +104,8 @@ static enum ebpf_threads_status ebpf_softirq_exited = NETDATA_THREAD_EBPF_RUNNIN static void softirq_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_softirq_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*softirq_threads.thread); + ebpf_softirq_free(em); } /** @@ -87,18 +118,7 @@ static void softirq_exit(void *ptr) static void softirq_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_softirq_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - freez(softirq_threads.thread); - - for (int i = 0; softirq_tracepoints[i].class != NULL; i++) { - ebpf_disable_tracepoint(&softirq_tracepoints[i]); - } - freez(softirq_ebpf_vals); - - softirq_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_softirq_free(em); } /***************************************************************** @@ -138,15 +158,11 @@ static void *softirq_reader(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_SOFTIRQ_SLEEP_MS * em->update_every; - while (ebpf_softirq_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - UNUSED(dt); - if (ebpf_softirq_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); softirq_read_latency_map(); } - ebpf_softirq_exited = NETDATA_THREAD_EBPF_STOPPED; netdata_thread_cleanup_pop(1); return NULL; @@ -252,26 +268,21 @@ void *ebpf_softirq_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = softirq_maps; - if (!em->enabled) { - goto endsoftirq; - } - if (ebpf_enable_tracepoints(softirq_tracepoints) == 0) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endsoftirq; } em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endsoftirq; } softirq_collector(em); endsoftirq: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); diff --git a/collectors/ebpf.plugin/ebpf_softirq.h b/collectors/ebpf.plugin/ebpf_softirq.h index a22751895..7dcddbb49 100644 --- a/collectors/ebpf.plugin/ebpf_softirq.h +++ b/collectors/ebpf.plugin/ebpf_softirq.h @@ -29,6 +29,6 @@ typedef struct sofirq_val { } softirq_val_t; extern struct config softirq_config; -extern void *ebpf_softirq_thread(void *ptr); +void *ebpf_softirq_thread(void *ptr); #endif /* NETDATA_EBPF_SOFTIRQ_H */ diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c index 71d0c402b..8199573a9 100644 --- a/collectors/ebpf.plugin/ebpf_swap.c +++ b/collectors/ebpf.plugin/ebpf_swap.c @@ -34,9 +34,16 @@ static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = NULL, .internal_input = 0, .user_input = 0}}; -struct netdata_static_thread swap_threads = {"SWAP KERNEL", NULL, NULL, 1, - NULL, NULL, NULL}; -static enum ebpf_threads_status ebpf_swap_exited = NETDATA_THREAD_EBPF_RUNNING; +struct netdata_static_thread swap_threads = { + .name = "SWAP KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; netdata_ebpf_targets_t swap_targets[] = { {.name = "swap_readpage", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "swap_writepage", .mode = EBPF_LOAD_TRAMPOLINE}, @@ -71,6 +78,7 @@ static void ebpf_swap_disable_trampoline(struct swap_bpf *obj) { bpf_program__set_autoload(obj->progs.netdata_swap_readpage_fentry, false); bpf_program__set_autoload(obj->progs.netdata_swap_writepage_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false); } /** @@ -87,6 +95,9 @@ static void ebpf_swap_set_trampoline_target(struct swap_bpf *obj) bpf_program__set_attach_target(obj->progs.netdata_swap_writepage_fentry, 0, swap_targets[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name); + + bpf_program__set_attach_target(obj->progs.netdata_release_task_fentry, 0, + EBPF_COMMON_FNCT_CLEAN_UP); } /** @@ -114,6 +125,13 @@ static int ebpf_swap_attach_kprobe(struct swap_bpf *obj) if (ret) return -1; + obj->links.netdata_release_task_probe = bpf_program__attach_kprobe(obj->progs.netdata_release_task_probe, + false, + EBPF_COMMON_FNCT_CLEAN_UP); + ret = libbpf_get_error(obj->links.netdata_swap_writepage_probe); + if (ret) + return -1; + return 0; } @@ -146,6 +164,19 @@ static void ebpf_swap_adjust_map_size(struct swap_bpf *obj, ebpf_module_t *em) } /** + * Disable Release Task + * + * Disable release task when apps is not enabled. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_swap_disable_release_task(struct swap_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_release_task_probe, false); +} + +/** * Load and attach * * Load and attach the eBPF code in kernel. @@ -168,13 +199,16 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t ebpf_swap_disable_trampoline(obj); } + ebpf_swap_adjust_map_size(obj, em); + + if (!em->apps_charts && !em->cgroup_charts) + ebpf_swap_disable_release_task(obj); + int ret = swap_bpf__load(obj); if (ret) { return ret; } - ebpf_swap_adjust_map_size(obj, em); - ret = (test == EBPF_LOAD_TRAMPOLINE) ? swap_bpf__attach(obj) : ebpf_swap_attach_kprobe(obj); if (!ret) { ebpf_swap_set_hash_tables(obj); @@ -193,6 +227,38 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t *****************************************************************/ /** + * Cachestat Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_swap_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + ebpf_cleanup_publish_syscall(swap_publish_aggregated); + + freez(swap_vector); + freez(swap_values); + freez(swap_threads.thread); + +#ifdef LIBBPF_MAJOR_VERSION + if (bpf_obj) + swap_bpf__destroy(bpf_obj); +#endif + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} + +/** * Swap exit * * Cancel thread and exit. @@ -202,12 +268,8 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t static void ebpf_swap_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_swap_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*swap_threads.thread); + ebpf_swap_free(em); } /** @@ -220,21 +282,7 @@ static void ebpf_swap_exit(void *ptr) static void ebpf_swap_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_swap_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - ebpf_cleanup_publish_syscall(swap_publish_aggregated); - - freez(swap_vector); - freez(swap_values); - freez(swap_threads.thread); - -#ifdef LIBBPF_MAJOR_VERSION - if (bpf_obj) - swap_bpf__destroy(bpf_obj); -#endif - swap_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_swap_free(em); } /***************************************************************** @@ -401,17 +449,12 @@ void *ebpf_swap_read_hash(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_SWAP_SLEEP_MS * em->update_every; - while (ebpf_swap_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_swap_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); read_global_table(); } - ebpf_swap_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -505,20 +548,15 @@ static void ebpf_swap_sum_cgroup_pids(netdata_publish_swap_t *swap, struct pid_o * Send Systemd charts * * Send collected data to Netdata. - * - * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned - * otherwise function returns 1 to avoid chart recreation */ -static int ebpf_send_systemd_swap_charts() +static void ebpf_send_systemd_swap_charts() { - int ret = 1; ebpf_cgroup_target_t *ect; write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_READ_CHART); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.read); - } else if (unlikely(ect->systemd)) - ret = 0; + } } write_end_chart(); @@ -529,8 +567,6 @@ static int ebpf_send_systemd_swap_charts() } } write_end_chart(); - - return ret; } /** @@ -644,14 +680,11 @@ void ebpf_swap_send_cgroup_data(int update_every) int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; if (has_systemd) { - static int systemd_charts = 0; - if (!systemd_charts) { + if (send_cgroup_chart) { ebpf_create_systemd_swap_charts(update_every); - systemd_charts = 1; fflush(stdout); } - - systemd_charts = ebpf_send_systemd_swap_charts(); + ebpf_send_systemd_swap_charts(); } for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { @@ -812,7 +845,8 @@ static void ebpf_create_swap_charts(int update_every) static int ebpf_swap_load_bpf(ebpf_module_t *em) { int ret = 0; - if (em->load == EBPF_LOAD_LEGACY) { + ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_SWAP_READPAGE_CALL].mode); + if (em->load & EBPF_LOAD_LEGACY) { em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); if (!em->probe_links) { ret = -1; @@ -852,14 +886,11 @@ void *ebpf_swap_thread(void *ptr) ebpf_update_pid_table(&swap_maps[NETDATA_PID_SWAP_TABLE], em); - if (!em->enabled) - goto endswap; - #ifdef LIBBPF_MAJOR_VERSION ebpf_adjust_thread_load(em, default_btf); #endif if (ebpf_swap_load_bpf(em)) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endswap; } @@ -877,8 +908,7 @@ void *ebpf_swap_thread(void *ptr) swap_collector(em); endswap: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_swap.h b/collectors/ebpf.plugin/ebpf_swap.h index 80c2c8e94..79182e52e 100644 --- a/collectors/ebpf.plugin/ebpf_swap.h +++ b/collectors/ebpf.plugin/ebpf_swap.h @@ -44,8 +44,8 @@ enum swap_counters { extern netdata_publish_swap_t **swap_pid; -extern void *ebpf_swap_thread(void *ptr); -extern void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr); +void *ebpf_swap_thread(void *ptr); +void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr); extern struct config swap_config; extern netdata_ebpf_targets_t swap_targets[]; diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c index 0e56f541d..840497533 100644 --- a/collectors/ebpf.plugin/ebpf_sync.c +++ b/collectors/ebpf.plugin/ebpf_sync.c @@ -10,8 +10,16 @@ static netdata_publish_syscall_t sync_counter_publish_aggregated[NETDATA_SYNC_ID static netdata_idx_t sync_hash_values[NETDATA_SYNC_IDX_END]; -struct netdata_static_thread sync_threads = {"SYNC KERNEL", NULL, NULL, 1, - NULL, NULL, NULL}; +struct netdata_static_thread sync_threads = { + .name = "SYNC KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; static ebpf_local_maps_t sync_maps[] = {{.name = "tbl_sync", .internal_input = NETDATA_SYNC_END, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, @@ -48,8 +56,6 @@ netdata_ebpf_targets_t sync_targets[] = { {.name = NETDATA_SYSCALLS_SYNC, .mode {.name = NETDATA_SYSCALLS_FDATASYNC, .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}}; -static enum ebpf_threads_status ebpf_sync_exited = NETDATA_THREAD_EBPF_RUNNING; - #ifdef LIBBPF_MAJOR_VERSION /***************************************************************** @@ -183,6 +189,7 @@ static inline int ebpf_sync_load_and_attach(struct sync_bpf *obj, ebpf_module_t * *****************************************************************/ +#ifdef LIBBPF_MAJOR_VERSION /** * Cleanup Objects * @@ -193,22 +200,37 @@ void ebpf_sync_cleanup_objects() int i; for (i = 0; local_syscalls[i].syscall; i++) { ebpf_sync_syscalls_t *w = &local_syscalls[i]; - if (w->probe_links) { - struct bpf_program *prog; - size_t j = 0 ; - bpf_object__for_each_program(prog, w->objects) { - bpf_link__destroy(w->probe_links[j]); - j++; - } - freez(w->probe_links); - if (w->objects) - bpf_object__close(w->objects); - } -#ifdef LIBBPF_MAJOR_VERSION - else if (w->sync_obj) + if (w->sync_obj) sync_bpf__destroy(w->sync_obj); + } +} #endif + +/** + * Sync Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_sync_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; } + pthread_mutex_unlock(&ebpf_exit_cleanup); + +#ifdef LIBBPF_MAJOR_VERSION + ebpf_sync_cleanup_objects(); +#endif + freez(sync_threads.thread); + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); } /** @@ -221,12 +243,8 @@ void ebpf_sync_cleanup_objects() static void ebpf_sync_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_sync_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*sync_threads.thread); + ebpf_sync_free(em); } /** @@ -237,14 +255,7 @@ static void ebpf_sync_exit(void *ptr) static void ebpf_sync_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_sync_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - ebpf_sync_cleanup_objects(); - freez(sync_threads.thread); - - sync_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_sync_free(em); } /***************************************************************** @@ -291,7 +302,7 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em) for (i = 0; local_syscalls[i].syscall; i++) { ebpf_sync_syscalls_t *w = &local_syscalls[i]; if (w->enabled) { - if (em->load == EBPF_LOAD_LEGACY) { + if (em->load & EBPF_LOAD_LEGACY) { if (ebpf_sync_load_legacy(w, em)) errors++; @@ -372,17 +383,12 @@ void *ebpf_sync_read_hash(void *ptr) heartbeat_init(&hb); usec_t step = NETDATA_EBPF_SYNC_SLEEP_MS * em->update_every; - while (ebpf_sync_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_sync_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); read_global_table(); } - ebpf_sync_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -572,14 +578,11 @@ void *ebpf_sync_thread(void *ptr) ebpf_sync_parse_syscalls(); - if (!em->enabled) - goto endsync; - #ifdef LIBBPF_MAJOR_VERSION ebpf_adjust_thread_load(em, default_btf); #endif if (ebpf_sync_initialize_syscall(em)) { - em->enabled = CONFIG_BOOLEAN_NO; + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endsync; } @@ -598,8 +601,7 @@ void *ebpf_sync_thread(void *ptr) sync_collector(em); endsync: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_sync.h b/collectors/ebpf.plugin/ebpf_sync.h index a52434c17..cace2a1cf 100644 --- a/collectors/ebpf.plugin/ebpf_sync.h +++ b/collectors/ebpf.plugin/ebpf_sync.h @@ -52,7 +52,7 @@ enum netdata_sync_table { NETDATA_SYNC_GLOBAL_TABLE }; -extern void *ebpf_sync_thread(void *ptr); +void *ebpf_sync_thread(void *ptr); extern struct config sync_config; extern netdata_ebpf_targets_t sync_targets[]; diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c index 6746cc624..ad6de4a07 100644 --- a/collectors/ebpf.plugin/ebpf_vfs.c +++ b/collectors/ebpf.plugin/ebpf_vfs.c @@ -34,16 +34,402 @@ struct config vfs_config = { .first_section = NULL, .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; -struct netdata_static_thread vfs_threads = {"VFS KERNEL", - NULL, NULL, 1, NULL, - NULL, NULL}; -static enum ebpf_threads_status ebpf_vfs_exited = NETDATA_THREAD_EBPF_RUNNING; +struct netdata_static_thread vfs_threads = { + .name = "VFS KERNEL", + .config_section = NULL, + .config_name = NULL, + .env_name = NULL, + .enabled = 1, + .thread = NULL, + .init_routine = NULL, + .start_routine = NULL +}; + +netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = "vfs_writev", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = "vfs_read", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = "vfs_readv", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = "vfs_unlink", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = "vfs_fsync", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = "vfs_open", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = "vfs_create", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = "release_task", .mode = EBPF_LOAD_TRAMPOLINE}, + {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}}; + +#ifdef LIBBPF_MAJOR_VERSION +#include "includes/vfs.skel.h" // BTF code + +static struct vfs_bpf *bpf_obj = NULL; + +/** + * Disable probe + * + * Disable all probes to use exclusively another method. + * + * @param obj is the main structure for bpf objects + */ +static void ebpf_vfs_disable_probes(struct vfs_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_vfs_write_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_write_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_writev_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_writev_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_read_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_read_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_readv_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_readv_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_unlink_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_unlink_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_fsync_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_fsync_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_open_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_open_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_create_kprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_create_kretprobe, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_kprobe, false); +} + +/* + * Disable trampoline + * + * Disable all trampoline to use exclusively another method. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_vfs_disable_trampoline(struct vfs_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_vfs_write_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_write_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_writev_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_writev_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_read_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_read_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_readv_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_readv_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_unlink_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_fsync_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_fsync_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_open_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_open_fexit, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_create_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_fentry, false); +} + +/** + * Set trampoline target + * + * Set the targets we will monitor. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_vfs_set_trampoline_target(struct vfs_bpf *obj) +{ + bpf_program__set_attach_target(obj->progs.netdata_vfs_write_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_WRITE].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_write_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_WRITE].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_writev_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_WRITEV].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_writev_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_WRITEV].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_read_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_READ].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_read_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_READ].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_readv_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_READV].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_readv_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_READV].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_unlink_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_UNLINK].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_fsync_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_FSYNC].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_fsync_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_FSYNC].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_open_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_OPEN].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_open_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_OPEN].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_create_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_CREATE].name); + + bpf_program__set_attach_target(obj->progs.netdata_vfs_release_task_fentry, 0, EBPF_COMMON_FNCT_CLEAN_UP); +} + +/** + * Attach Probe + * + * Attach probes to target + * + * @param obj is the main structure for bpf objects. + * + * @return It returns 0 on success and -1 otherwise. + */ +static int ebpf_vfs_attach_probe(struct vfs_bpf *obj) +{ + obj->links.netdata_vfs_write_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_write_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_WRITE].name); + int ret = libbpf_get_error(obj->links.netdata_vfs_write_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_write_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_write_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_WRITE].name); + ret = libbpf_get_error(obj->links.netdata_vfs_write_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_writev_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_writev_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_WRITEV].name); + ret = libbpf_get_error(obj->links.netdata_vfs_writev_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_writev_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_writev_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_WRITEV].name); + ret = libbpf_get_error(obj->links.netdata_vfs_writev_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_read_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_read_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_READ].name); + ret = libbpf_get_error(obj->links.netdata_vfs_read_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_read_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_read_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_READ].name); + ret = libbpf_get_error(obj->links.netdata_vfs_read_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_readv_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_readv_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_READV].name); + ret = libbpf_get_error(obj->links.netdata_vfs_readv_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_readv_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_readv_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_READV].name); + ret = libbpf_get_error(obj->links.netdata_vfs_readv_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_unlink_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_unlink_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_UNLINK].name); + ret = libbpf_get_error(obj->links.netdata_vfs_unlink_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_unlink_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_unlink_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_UNLINK].name); + ret = libbpf_get_error(obj->links.netdata_vfs_unlink_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_fsync_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_fsync_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_FSYNC].name); + ret = libbpf_get_error(obj->links.netdata_vfs_fsync_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_fsync_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_fsync_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_FSYNC].name); + ret = libbpf_get_error(obj->links.netdata_vfs_fsync_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_open_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_open_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_OPEN].name); + ret = libbpf_get_error(obj->links.netdata_vfs_open_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_open_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_open_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_OPEN].name); + ret = libbpf_get_error(obj->links.netdata_vfs_open_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_create_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_create_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_CREATE].name); + ret = libbpf_get_error(obj->links.netdata_vfs_create_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_create_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_create_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_CREATE].name); + ret = libbpf_get_error(obj->links.netdata_vfs_create_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_fsync_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_fsync_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_FSYNC].name); + ret = libbpf_get_error(obj->links.netdata_vfs_fsync_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_fsync_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_fsync_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_FSYNC].name); + ret = libbpf_get_error(obj->links.netdata_vfs_fsync_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_open_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_open_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_OPEN].name); + ret = libbpf_get_error(obj->links.netdata_vfs_open_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_open_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_open_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_OPEN].name); + ret = libbpf_get_error(obj->links.netdata_vfs_open_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_create_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_create_kprobe, false, + vfs_targets[NETDATA_EBPF_VFS_CREATE].name); + ret = libbpf_get_error(obj->links.netdata_vfs_create_kprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_create_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_create_kretprobe, true, + vfs_targets[NETDATA_EBPF_VFS_CREATE].name); + ret = libbpf_get_error(obj->links.netdata_vfs_create_kretprobe); + if (ret) + return -1; + + obj->links.netdata_vfs_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_release_task_fentry, + true, + EBPF_COMMON_FNCT_CLEAN_UP); + ret = libbpf_get_error(obj->links.netdata_vfs_release_task_kprobe); + if (ret) + return -1; + + return 0; +} + +/** + * Adjust Map Size + * + * Resize maps according input from users. + * + * @param obj is the main structure for bpf objects. + * @param em structure with configuration + */ +static void ebpf_vfs_adjust_map_size(struct vfs_bpf *obj, ebpf_module_t *em) +{ + ebpf_update_map_size(obj->maps.tbl_vfs_pid, &vfs_maps[NETDATA_VFS_PID], + em, bpf_map__name(obj->maps.tbl_vfs_pid)); +} + +/** + * Set hash tables + * + * Set the values for maps according the value given by kernel. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_vfs_set_hash_tables(struct vfs_bpf *obj) +{ + vfs_maps[NETDATA_VFS_ALL].map_fd = bpf_map__fd(obj->maps.tbl_vfs_stats); + vfs_maps[NETDATA_VFS_PID].map_fd = bpf_map__fd(obj->maps.tbl_vfs_pid); + vfs_maps[NETDATA_VFS_CTRL].map_fd = bpf_map__fd(obj->maps.vfs_ctrl); +} + +/** + * Disable Release Task + * + * Disable release task when apps is not enabled. + * + * @param obj is the main structure for bpf objects. + */ +static void ebpf_vfs_disable_release_task(struct vfs_bpf *obj) +{ + bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_fentry, false); + bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_kprobe, false); +} + +/** + * Load and attach + * + * Load and attach the eBPF code in kernel. + * + * @param obj is the main structure for bpf objects. + * @param em structure with configuration + * + * @return it returns 0 on succes and -1 otherwise + */ +static inline int ebpf_vfs_load_and_attach(struct vfs_bpf *obj, ebpf_module_t *em) +{ + netdata_ebpf_targets_t *mt = em->targets; + netdata_ebpf_program_loaded_t test = mt[NETDATA_EBPF_VFS_WRITE].mode; + + if (test == EBPF_LOAD_TRAMPOLINE) { + ebpf_vfs_disable_probes(obj); + + ebpf_vfs_set_trampoline_target(obj); + } else { + ebpf_vfs_disable_trampoline(obj); + } + + ebpf_vfs_adjust_map_size(obj, em); + + if (!em->apps_charts && !em->cgroup_charts) + ebpf_vfs_disable_release_task(obj); + + int ret = vfs_bpf__load(obj); + if (ret) { + return ret; + } + + ret = (test == EBPF_LOAD_TRAMPOLINE) ? vfs_bpf__attach(obj) : ebpf_vfs_attach_probe(obj); + if (!ret) { + ebpf_vfs_set_hash_tables(obj); + + ebpf_update_controller(vfs_maps[NETDATA_VFS_CTRL].map_fd, em); + } + + return ret; +} +#endif /***************************************************************** * * FUNCTIONS TO CLOSE THE THREAD * *****************************************************************/ + +/** + * Cachestat Free + * + * Cleanup variables after child threads to stop + * + * @param ptr thread data. + */ +static void ebpf_vfs_free(ebpf_module_t *em) +{ + pthread_mutex_lock(&ebpf_exit_cleanup); + if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING; + pthread_mutex_unlock(&ebpf_exit_cleanup); + return; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + + freez(vfs_hash_values); + freez(vfs_vector); + freez(vfs_threads.thread); + +#ifdef LIBBPF_MAJOR_VERSION + if (bpf_obj) + vfs_bpf__destroy(bpf_obj); +#endif + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); +} + /** * Exit * @@ -54,12 +440,8 @@ static enum ebpf_threads_status ebpf_vfs_exited = NETDATA_THREAD_EBPF_RUNNING; static void ebpf_vfs_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - em->enabled = NETDATA_MAIN_THREAD_EXITED; - return; - } - - ebpf_vfs_exited = NETDATA_THREAD_EBPF_STOPPING; + netdata_thread_cancel(*vfs_threads.thread); + ebpf_vfs_free(em); } /** @@ -70,15 +452,7 @@ static void ebpf_vfs_exit(void *ptr) static void ebpf_vfs_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (ebpf_vfs_exited != NETDATA_THREAD_EBPF_STOPPED) - return; - - freez(vfs_hash_values); - freez(vfs_vector); - freez(vfs_threads.thread); - - vfs_threads.enabled = NETDATA_MAIN_THREAD_EXITED; - em->enabled = NETDATA_MAIN_THREAD_EXITED; + ebpf_vfs_free(em); } /***************************************************************** @@ -519,17 +893,12 @@ void *ebpf_vfs_read_hash(void *ptr) usec_t step = NETDATA_LATENCY_VFS_SLEEP_MS * em->update_every; //This will be cancelled by its parent - while (ebpf_vfs_exited == NETDATA_THREAD_EBPF_RUNNING) { - usec_t dt = heartbeat_next(&hb, step); - (void)dt; - if (ebpf_vfs_exited == NETDATA_THREAD_EBPF_STOPPING) - break; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); read_global_table(); } - ebpf_vfs_exited = NETDATA_THREAD_EBPF_STOPPED; - netdata_thread_cleanup_pop(1); return NULL; } @@ -976,20 +1345,15 @@ static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em) * Send collected data to Netdata. * * @param em the main collector structure - * - * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned - * otherwise function returns 1 to avoid chart recreation */ -static int ebpf_send_systemd_vfs_charts(ebpf_module_t *em) +static void ebpf_send_systemd_vfs_charts(ebpf_module_t *em) { - int ret = 1; ebpf_cgroup_target_t *ect; write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED); for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.unlink_call); - } else if (unlikely(ect->systemd)) - ret = 0; + } } write_end_chart(); @@ -1105,8 +1469,6 @@ static int ebpf_send_systemd_vfs_charts(ebpf_module_t *em) } write_end_chart(); } - - return ret; } /** @@ -1127,13 +1489,10 @@ static void ebpf_vfs_send_cgroup_data(ebpf_module_t *em) int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; if (has_systemd) { - static int systemd_charts = 0; - if (!systemd_charts) { + if (send_cgroup_chart) { ebpf_create_systemd_vfs_charts(em); - systemd_charts = 1; } - - systemd_charts = ebpf_send_systemd_vfs_charts(em); + ebpf_send_systemd_vfs_charts(em); } for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { @@ -1543,6 +1902,36 @@ static void ebpf_vfs_allocate_global_vectors(int apps) * *****************************************************************/ +/* + * Load BPF + * + * Load BPF files. + * + * @param em the structure with configuration + */ +static int ebpf_vfs_load_bpf(ebpf_module_t *em) +{ + int ret = 0; + ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_EBPF_VFS_WRITE].mode); + if (em->load & EBPF_LOAD_LEGACY) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { + ret = -1; + } + } +#ifdef LIBBPF_MAJOR_VERSION + else { + bpf_obj = vfs_bpf__open(); + if (!bpf_obj) + ret = -1; + else + ret = ebpf_vfs_load_and_attach(bpf_obj, em); + } +#endif + + return ret; +} + /** * Process thread * @@ -1563,12 +1952,11 @@ void *ebpf_vfs_thread(void *ptr) ebpf_vfs_allocate_global_vectors(em->apps_charts); - if (!em->enabled) - goto endvfs; - - em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); - if (!em->probe_links) { - em->enabled = CONFIG_BOOLEAN_NO; +#ifdef LIBBPF_MAJOR_VERSION + ebpf_adjust_thread_load(em, default_btf); +#endif + if (ebpf_vfs_load_bpf(em)) { + em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED; goto endvfs; } @@ -1588,8 +1976,7 @@ void *ebpf_vfs_thread(void *ptr) vfs_collector(em); endvfs: - if (!em->enabled) - ebpf_update_disabled_plugin_stats(em); + ebpf_update_disabled_plugin_stats(em); netdata_thread_cleanup_pop(1); return NULL; diff --git a/collectors/ebpf.plugin/ebpf_vfs.h b/collectors/ebpf.plugin/ebpf_vfs.h index 87a21e39c..2e3c7cc29 100644 --- a/collectors/ebpf.plugin/ebpf_vfs.h +++ b/collectors/ebpf.plugin/ebpf_vfs.h @@ -149,13 +149,28 @@ enum vfs_counters { enum netdata_vfs_tables { NETDATA_VFS_PID, - NETDATA_VFS_ALL + NETDATA_VFS_ALL, + NETDATA_VFS_CTRL +}; + +enum netdata_vfs_calls_name { + NETDATA_EBPF_VFS_WRITE, + NETDATA_EBPF_VFS_WRITEV, + NETDATA_EBPF_VFS_READ, + NETDATA_EBPF_VFS_READV, + NETDATA_EBPF_VFS_UNLINK, + NETDATA_EBPF_VFS_FSYNC, + NETDATA_EBPF_VFS_OPEN, + NETDATA_EBPF_VFS_CREATE, + + NETDATA_VFS_END_LIST }; extern netdata_publish_vfs_t **vfs_pid; -extern void *ebpf_vfs_thread(void *ptr); -extern void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr); +void *ebpf_vfs_thread(void *ptr); +void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr); +extern netdata_ebpf_targets_t vfs_targets[]; extern struct config vfs_config; |