diff options
Diffstat (limited to 'collectors/ebpf.plugin')
33 files changed, 1419 insertions, 1523 deletions
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md index dc406b7f..550982ad 100644 --- a/collectors/ebpf.plugin/README.md +++ b/collectors/ebpf.plugin/README.md @@ -8,7 +8,7 @@ sidebar_label: "eBPF" # eBPF monitoring with Netdata -The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs to help you troubleshoot and debug how applications interact with the Linux kernel. The `ebpf.plugin` uses [tracepoints, trampoline, and2 kprobes](#data-collection) to collect a wide array of high value data about the host that would otherwise be impossible to capture. +The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs to help you troubleshoot and debug how applications interact with the Linux kernel. The `ebpf.plugin` uses [tracepoints, trampoline, and2 kprobes](#how-netdata-collects-data-using-probes-and-tracepoints) to collect a wide array of high value data about the host that would otherwise be impossible to capture. > ❗ eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`, and all kernels on CentOS 7.6 or later. For kernels older than `4.11.0`, improved support is in active development. @@ -20,8 +20,6 @@ For hands-on configuration and troubleshooting tips see our [tutorial on trouble <figcaption>An example of virtual file system (VFS) charts made possible by the eBPF collector plugin.</figcaption> </figure> -<a id="data-collection"> </a> - ## How Netdata collects data using probes and tracepoints Netdata uses the following features from the Linux kernel to run eBPF programs: diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c index 2b25f50a..65c96f67 100644 --- a/collectors/ebpf.plugin/ebpf.c +++ b/collectors/ebpf.plugin/ebpf.c @@ -18,8 +18,6 @@ char *ebpf_plugin_dir = PLUGINS_DIR; static char *ebpf_configured_log_dir = LOG_DIR; char *ebpf_algorithms[] = {"absolute", "incremental"}; -static int thread_finished = 0; -int close_ebpf_plugin = 0; struct config collector_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, @@ -29,7 +27,6 @@ struct config collector_config = { .first_section = NULL, int running_on_kernel = 0; int ebpf_nprocs; int isrh = 0; -uint32_t finalized_threads = 1; pthread_mutex_t lock; pthread_mutex_t collect_data_mutex; @@ -37,132 +34,264 @@ pthread_cond_t collect_data_cond_var; ebpf_module_t ebpf_modules[] = { { .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_process_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config, .config_file = NETDATA_PROCESS_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10, - .load = EBPF_LOAD_LEGACY, .targets = NULL}, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, { .thread_name = "socket", .config_name = "socket", .enabled = 0, .start_routine = ebpf_socket_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_socket_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config, .config_file = NETDATA_NETWORK_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = socket_targets}, + .load = EBPF_LOAD_LEGACY, .targets = socket_targets, .probe_links = NULL, .objects = NULL}, { .thread_name = "cachestat", .config_name = "cachestat", .enabled = 0, .start_routine = ebpf_cachestat_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_cachestat_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config, .config_file = NETDATA_CACHESTAT_CONFIG_FILE, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18| - NETDATA_V5_4 | NETDATA_V5_15 | NETDATA_V5_16, - .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18| NETDATA_V5_4 | NETDATA_V5_15 | + NETDATA_V5_16, + .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets, .probe_links = NULL, .objects = NULL}, { .thread_name = "sync", .config_name = "sync", .enabled = 0, .start_routine = ebpf_sync_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config, .config_file = NETDATA_SYNC_CONFIG_FILE, // All syscalls have the same kernels .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = sync_targets}, + .load = EBPF_LOAD_LEGACY, .targets = sync_targets, .probe_links = NULL, .objects = NULL}, { .thread_name = "dc", .config_name = "dc", .enabled = 0, .start_routine = ebpf_dcstat_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_dcstat_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config, .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = dc_targets}, + .load = EBPF_LOAD_LEGACY, .targets = dc_targets, .probe_links = NULL, .objects = NULL}, { .thread_name = "swap", .config_name = "swap", .enabled = 0, .start_routine = ebpf_swap_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_swap_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config, .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = swap_targets}, + .load = EBPF_LOAD_LEGACY, .targets = swap_targets, .probe_links = NULL, .objects = NULL}, { .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config, .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL}, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, { .thread_name = "filesystem", .config_name = "filesystem", .enabled = 0, .start_routine = ebpf_filesystem_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config, .config_file = NETDATA_FILESYSTEM_CONFIG_FILE, //We are setting kernels as zero, because we load eBPF programs according the kernel running. - .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL }, + .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL }, { .thread_name = "disk", .config_name = "disk", .enabled = 0, .start_routine = ebpf_disk_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config, .config_file = NETDATA_DISK_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL}, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, { .thread_name = "mount", .config_name = "mount", .enabled = 0, .start_routine = ebpf_mount_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config, .config_file = NETDATA_MOUNT_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = mount_targets}, + .load = EBPF_LOAD_LEGACY, .targets = mount_targets, .probe_links = NULL, .objects = NULL}, { .thread_name = "fd", .config_name = "fd", .enabled = 0, .start_routine = ebpf_fd_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_fd_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config, .config_file = NETDATA_FD_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_11, - .load = EBPF_LOAD_LEGACY, .targets = NULL}, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, { .thread_name = "hardirq", .config_name = "hardirq", .enabled = 0, .start_routine = ebpf_hardirq_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config, .config_file = NETDATA_HARDIRQ_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL}, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, { .thread_name = "softirq", .config_name = "softirq", .enabled = 0, .start_routine = ebpf_softirq_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config, .config_file = NETDATA_SOFTIRQ_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL}, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, { .thread_name = "oomkill", .config_name = "oomkill", .enabled = 0, .start_routine = ebpf_oomkill_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_oomkill_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config, .config_file = NETDATA_OOMKILL_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL}, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, { .thread_name = "shm", .config_name = "shm", .enabled = 0, .start_routine = ebpf_shm_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = ebpf_shm_create_apps_charts, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config, .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = shm_targets}, + .load = EBPF_LOAD_LEGACY, .targets = shm_targets, .probe_links = NULL, .objects = NULL}, { .thread_name = "mdflush", .config_name = "mdflush", .enabled = 0, .start_routine = ebpf_mdflush_thread, - .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config, .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE, .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4, - .load = EBPF_LOAD_LEGACY, .targets = NULL}, + .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL}, { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY, - .global_charts = 0, .apps_charts = CONFIG_BOOLEAN_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, + .global_charts = 0, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL, - .cfg = NULL, .config_name = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL}, + .cfg = NULL, .config_name = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, + .objects = NULL}, +}; + +struct netdata_static_thread ebpf_threads[] = { + {"EBPF PROCESS", NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF SOCKET" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF CACHESTAT" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF SYNC" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF DCSTAT" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF SWAP" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF VFS" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF FILESYSTEM" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF DISK" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF MOUNT" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF FD" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF HARDIRQ" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF SOFTIRQ" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF OOMKILL" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF SHM" , NULL, NULL, 1, + NULL, NULL, NULL}, + {"EBPF MDFLUSH" , NULL, NULL, 1, + NULL, NULL, NULL}, + {NULL , NULL, NULL, 0, + NULL, NULL, NULL} +}; + +ebpf_filesystem_partitions_t localfs[] = + {{.filesystem = "ext4", + .optional_filesystem = NULL, + .family = "ext4", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4}, + {.filesystem = "xfs", + .optional_filesystem = NULL, + .family = "xfs", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4}, + {.filesystem = "nfs", + .optional_filesystem = "nfs4", + .family = "nfs", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_ATTR_CHARTS, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4}, + {.filesystem = "zfs", + .optional_filesystem = NULL, + .family = "zfs", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4}, + {.filesystem = "btrfs", + .optional_filesystem = NULL, + .family = "btrfs", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = "btrfs_file_operations", .addr = 0}, + .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10}, + {.filesystem = NULL, + .optional_filesystem = NULL, + .family = NULL, + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}, + .kernels = 0}}; + +ebpf_sync_syscalls_t local_syscalls[] = { + {.syscall = NETDATA_SYSCALLS_SYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, +#ifdef LIBBPF_MAJOR_VERSION + .sync_obj = NULL +#endif + }, + {.syscall = NETDATA_SYSCALLS_SYNCFS, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, +#ifdef LIBBPF_MAJOR_VERSION + .sync_obj = NULL +#endif + }, + {.syscall = NETDATA_SYSCALLS_MSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, +#ifdef LIBBPF_MAJOR_VERSION + .sync_obj = NULL +#endif + }, + {.syscall = NETDATA_SYSCALLS_FSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, +#ifdef LIBBPF_MAJOR_VERSION + .sync_obj = NULL +#endif + }, + {.syscall = NETDATA_SYSCALLS_FDATASYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, +#ifdef LIBBPF_MAJOR_VERSION + .sync_obj = NULL +#endif + }, + {.syscall = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, +#ifdef LIBBPF_MAJOR_VERSION + .sync_obj = NULL +#endif + }, + {.syscall = NULL, .enabled = CONFIG_BOOLEAN_NO, .objects = NULL, .probe_links = NULL, +#ifdef LIBBPF_MAJOR_VERSION + .sync_obj = NULL +#endif + } }; // Link with apps.plugin @@ -193,125 +322,95 @@ char *btf_path = NULL; *****************************************************************/ /** - * Clean Loaded Events - * - * This function cleans the events previous loaded on Linux. -void clean_loaded_events() -{ - int event_pid; - for (event_pid = 0; ebpf_modules[event_pid].probes; event_pid++) - clean_kprobe_events(NULL, (int)ebpf_modules[event_pid].thread_id, ebpf_modules[event_pid].probes); -} - */ - -/** * Close the collector gracefully * * @param sig is the signal number used to close the collector */ static void ebpf_exit(int sig) { - close_ebpf_plugin = 1; - static int remove_pid = 0; - - // When both threads were not finished case I try to go in front this address, the collector will crash - if (!thread_finished) { - return; - } - - if (ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled) { - ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled = 0; - clean_socket_apps_structures(); - freez(socket_bandwidth_curr); +#ifdef LIBBPF_MAJOR_VERSION + if (default_btf) { + btf__free(default_btf); + default_btf = NULL; } +#endif - if (ebpf_modules[EBPF_MODULE_CACHESTAT_IDX].enabled) { - ebpf_modules[EBPF_MODULE_CACHESTAT_IDX].enabled = 0; - clean_cachestat_pid_structures(); - freez(cachestat_pid); - } + char filename[FILENAME_MAX + 1]; + ebpf_pid_file(filename, FILENAME_MAX); + if (unlink(filename)) + error("Cannot remove PID file %s", filename); - if (ebpf_modules[EBPF_MODULE_DCSTAT_IDX].enabled) { - ebpf_modules[EBPF_MODULE_DCSTAT_IDX].enabled = 0; - clean_dcstat_pid_structures(); - freez(dcstat_pid); - } + exit(sig); +} - if (ebpf_modules[EBPF_MODULE_SWAP_IDX].enabled) { - ebpf_modules[EBPF_MODULE_SWAP_IDX].enabled = 0; - clean_swap_pid_structures(); - freez(swap_pid); - } +/** + * Unload loegacy code + * + * @param objects objects loaded from eBPF programs + * @param probe_links links from loader + */ +static void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links) +{ + if (!probe_links || !objects) + return; - if (ebpf_modules[EBPF_MODULE_VFS_IDX].enabled) { - ebpf_modules[EBPF_MODULE_VFS_IDX].enabled = 0; - clean_vfs_pid_structures(); - freez(vfs_pid); + struct bpf_program *prog; + size_t j = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[j]); + j++; } + freez(probe_links); + if (objects) + bpf_object__close(objects); +} - if (ebpf_modules[EBPF_MODULE_FD_IDX].enabled) { - ebpf_modules[EBPF_MODULE_FD_IDX].enabled = 0; - clean_fd_pid_structures(); - freez(fd_pid); +int ebpf_exit_plugin = 0; +/** + * Close the collector gracefully + * + * @param sig is the signal number used to close the collector + */ +static void ebpf_stop_threads(int sig) +{ + ebpf_exit_plugin = 1; + int i; + for (i = 0; ebpf_threads[i].name != NULL; i++); + + usec_t max = 2 * USEC_PER_SEC, step = 100000; + while (i && max) { + max -= step; + sleep_usec(step); + i = 0; + int j; + for (j = 0; ebpf_threads[j].name != NULL; j++) { + if (ebpf_threads[j].enabled != NETDATA_MAIN_THREAD_EXITED) + i++; + } } - if (ebpf_modules[EBPF_MODULE_SHM_IDX].enabled) { - ebpf_modules[EBPF_MODULE_SHM_IDX].enabled = 0; - clean_shm_pid_structures(); - freez(shm_pid); + //Unload threads(except sync and filesystem) + for (i = 0; ebpf_threads[i].name != NULL; i++) { + if (ebpf_threads[i].enabled == NETDATA_MAIN_THREAD_EXITED && i != EBPF_MODULE_FILESYSTEM_IDX && + i != EBPF_MODULE_SYNC_IDX) + ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links); } - ebpf_close_cgroup_shm(); - - ebpf_clean_cgroup_pids(); - /* - int ret = fork(); - if (ret < 0) // error - error("Cannot fork(), so I won't be able to clean %skprobe_events", NETDATA_DEBUGFS); - else if (!ret) { // child - int i; - for (i = getdtablesize(); i >= 0; --i) - close(i); - - int fd = open("/dev/null", O_RDWR, 0); - if (fd != -1) { - dup2(fd, STDIN_FILENO); - dup2(fd, STDOUT_FILENO); - dup2(fd, STDERR_FILENO); - } - - if (fd > 2) - close(fd); - - int sid = setsid(); - if (sid >= 0) { - debug(D_EXIT, "Wait for father %d die", getpid()); - sleep_usec(200000); // Sleep 200 milliseconds to father dies. - clean_loaded_events(); - } else { - error("Cannot become session id leader, so I won't try to clean kprobe_events.\n"); + //Unload filesystem + if (ebpf_threads[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_MAIN_THREAD_EXITED) { + for (i = 0; localfs[i].filesystem != NULL; i++) { + ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links); } - } else { // parent - exit(0); - } - */ - -#ifdef LIBBPF_MAJOR_VERSION - if (default_btf) { - btf__free(default_btf); - default_btf = NULL; } -#endif - if (!remove_pid) { - remove_pid = 1; - char filename[FILENAME_MAX + 1]; - ebpf_pid_file(filename, FILENAME_MAX); - if (unlink(filename)) - error("Cannot remove PID file %s", filename); + //Unload Sync + if (ebpf_threads[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_MAIN_THREAD_EXITED) { + for (i = 0; local_syscalls[i].syscall != NULL; i++) { + ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links); + } } - exit(sig); + ebpf_exit(sig); } /***************************************************************** @@ -702,7 +801,7 @@ static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int disabl // oomkill stores data inside apps submenu, so it always need to have apps_enabled for plugin to create // its chart, without this comparison eBPF.plugin will try to store invalid data when apps is disabled. if (!disable_apps || !strcmp(em->thread_name, "oomkill")) { - em->apps_charts = CONFIG_BOOLEAN_YES; + em->apps_charts = NETDATA_EBPF_APPS_FLAG_YES; } if (!disable_cgroup) { @@ -767,7 +866,7 @@ static inline void ebpf_disable_apps() { int i; for (i = 0; ebpf_modules[i].thread_name; i++) { - ebpf_modules[i].apps_charts = 0; + ebpf_modules[i].apps_charts = NETDATA_EBPF_APPS_FLAG_NO; } } @@ -1074,7 +1173,6 @@ int ebpf_start_pthread_variables() pthread_mutex_init(&collect_data_mutex, NULL); if (pthread_cond_init(&collect_data_cond_var, NULL)) { - thread_finished++; error("Cannot start conditional variable to control Apps charts."); return -1; } @@ -1094,7 +1192,7 @@ static inline uint32_t ebpf_am_i_collect_pids() uint32_t ret = 0; int i; for (i = 0; ebpf_modules[i].thread_name; i++) { - ret |= ebpf_modules[i].cgroup_charts | ebpf_modules[i].apps_charts; + ret |= ebpf_modules[i].cgroup_charts | (ebpf_modules[i].apps_charts & NETDATA_EBPF_APPS_FLAG_YES); } return ret; @@ -1718,7 +1816,6 @@ static void ebpf_parse_args(int argc, char **argv) &apps_groups_default_target, &apps_groups_root_target, ebpf_stock_config_dir, "groups")) { error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", ebpf_stock_config_dir); - thread_finished++; ebpf_exit(1); } } else @@ -1875,6 +1972,19 @@ static void ebpf_manage_pid(pid_t pid) } /** + * Set start routine + * + * Set static routine before threads to be created. + */ + static void ebpf_set_static_routine() + { + int i; + for (i = 0; ebpf_modules[i].thread_name; i++) { + ebpf_threads[i].start_routine = ebpf_modules[i].start_routine; + } + } + +/** * Entry point * * @param argc the number of arguments @@ -1918,16 +2028,19 @@ int main(int argc, char **argv) return 4; } - signal(SIGINT, ebpf_exit); - signal(SIGTERM, ebpf_exit); - signal(SIGPIPE, ebpf_exit); + signal(SIGINT, ebpf_stop_threads); + signal(SIGQUIT, ebpf_stop_threads); + signal(SIGTERM, ebpf_stop_threads); + signal(SIGPIPE, ebpf_stop_threads); if (ebpf_start_pthread_variables()) { - thread_finished++; error("Cannot start mutex to control overall charts."); ebpf_exit(5); } + netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX"); + if(verify_netdata_host_prefix() == -1) ebpf_exit(6); + ebpf_allocate_common_vectors(); #ifdef LIBBPF_MAJOR_VERSION @@ -1940,44 +2053,7 @@ int main(int argc, char **argv) read_local_ports("/proc/net/udp", IPPROTO_UDP); read_local_ports("/proc/net/udp6", IPPROTO_UDP); - struct netdata_static_thread ebpf_threads[] = { - {"EBPF PROCESS", NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_PROCESS_IDX].start_routine}, - {"EBPF SOCKET" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_SOCKET_IDX].start_routine}, - {"EBPF CACHESTAT" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_CACHESTAT_IDX].start_routine}, - {"EBPF SYNC" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_SYNC_IDX].start_routine}, - {"EBPF DCSTAT" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_DCSTAT_IDX].start_routine}, - {"EBPF SWAP" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_SWAP_IDX].start_routine}, - {"EBPF VFS" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_VFS_IDX].start_routine}, - {"EBPF FILESYSTEM" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].start_routine}, - {"EBPF DISK" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_DISK_IDX].start_routine}, - {"EBPF MOUNT" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_MOUNT_IDX].start_routine}, - {"EBPF FD" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_FD_IDX].start_routine}, - {"EBPF HARDIRQ" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_HARDIRQ_IDX].start_routine}, - {"EBPF SOFTIRQ" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_SOFTIRQ_IDX].start_routine}, - {"EBPF OOMKILL" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_OOMKILL_IDX].start_routine}, - {"EBPF SHM" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_SHM_IDX].start_routine}, - {"EBPF MDFLUSH" , NULL, NULL, 1, - NULL, NULL, ebpf_modules[EBPF_MODULE_MDFLUSH_IDX].start_routine}, - {NULL , NULL, NULL, 0, - NULL, NULL, NULL} - }; - - //clean_loaded_events(); + ebpf_set_static_routine(); int i; for (i = 0; ebpf_threads[i].name != NULL; i++) { @@ -1986,16 +2062,16 @@ int main(int argc, char **argv) ebpf_module_t *em = &ebpf_modules[i]; em->thread_id = i; - netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_JOINABLE, st->start_routine, em); + netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em); } - for (i = 0; ebpf_threads[i].name != NULL; i++) { - struct netdata_static_thread *st = &ebpf_threads[i]; - netdata_thread_join(*st->thread, NULL); + usec_t step = 60 * USEC_PER_SEC; + heartbeat_t hb; + heartbeat_init(&hb); + //Plugin will be killed when it receives a signal + for (;;) { + (void)heartbeat_next(&hb, step); } - thread_finished++; - ebpf_exit(0); - return 0; } diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h index 337e4f47..c23ca332 100644 --- a/collectors/ebpf.plugin/ebpf.h +++ b/collectors/ebpf.plugin/ebpf.h @@ -108,6 +108,12 @@ typedef struct ebpf_tracepoint { char *event; } ebpf_tracepoint_t; +enum ebpf_threads_status { + NETDATA_THREAD_EBPF_RUNNING, + NETDATA_THREAD_EBPF_STOPPING, + NETDATA_THREAD_EBPF_STOPPED +}; + // Copied from musl header #ifndef offsetof #if __GNUC__ > 3 @@ -162,7 +168,6 @@ extern void *ebpf_socket_thread(void *ptr); // Common variables extern pthread_mutex_t lock; -extern int close_ebpf_plugin; extern int ebpf_nprocs; extern int running_on_kernel; extern int isrh; @@ -265,7 +270,6 @@ extern int shm_fd_ebpf_cgroup; extern sem_t *shm_sem_ebpf_cgroup; extern pthread_mutex_t mutex_cgroup_shm; extern size_t all_pids_count; -extern uint32_t finalized_threads; extern ebpf_plugin_stats_t plugin_statistics; extern struct btf *default_btf; @@ -281,6 +285,9 @@ extern void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *u char *charttype, char *context, int order, int update_every); extern void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end); void ebpf_update_disabled_plugin_stats(ebpf_module_t *em); +extern ebpf_filesystem_partitions_t localfs[]; +extern ebpf_sync_syscalls_t local_syscalls[]; +extern int ebpf_exit_plugin; #define EBPF_MAX_SYNCHRONIZATION_TIME 300 diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c index 2c65db8d..7519e064 100644 --- a/collectors/ebpf.plugin/ebpf_apps.c +++ b/collectors/ebpf.plugin/ebpf_apps.c @@ -134,7 +134,7 @@ size_t zero_all_targets(struct target *root) while (pid_on_target) { struct pid_on_target *pid_on_target_to_free = pid_on_target; pid_on_target = pid_on_target->next; - free(pid_on_target_to_free); + freez(pid_on_target_to_free); } w->root_pid = NULL; @@ -1119,7 +1119,7 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd) key = pids->pid; ebpf_process_stat_t *w = global_process_stats[key]; if (!w) { - w = mallocz(sizeof(ebpf_process_stat_t)); + w = callocz(1, sizeof(ebpf_process_stat_t)); global_process_stats[key] = w; } diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h index 259e642a..f65a137b 100644 --- a/collectors/ebpf.plugin/ebpf_apps.h +++ b/collectors/ebpf.plugin/ebpf_apps.h @@ -433,8 +433,6 @@ extern size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep extern void collect_data_for_all_processes(int tbl_pid_stats_fd); -extern void clean_global_memory(); - extern ebpf_process_stat_t **global_process_stats; extern ebpf_process_publish_apps_t **current_apps_data; extern netdata_publish_cachestat_t **cachestat_pid; diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c index b565f635..14669bf6 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/collectors/ebpf.plugin/ebpf_cachestat.c @@ -5,9 +5,6 @@ netdata_publish_cachestat_t **cachestat_pid; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - static char *cachestat_counter_dimension_name[NETDATA_CACHESTAT_END] = { "ratio", "dirty", "hit", "miss" }; static netdata_syscall_stat_t cachestat_counter_aggregated_data[NETDATA_CACHESTAT_END]; @@ -18,8 +15,6 @@ netdata_cachestat_pid_t *cachestat_vector = NULL; static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END]; static netdata_idx_t *cachestat_values = NULL; -static int read_thread_closed = 1; - struct netdata_static_thread cachestat_threads = {"CACHESTAT KERNEL", NULL, NULL, 1, NULL, NULL, NULL}; @@ -44,6 +39,7 @@ struct config cachestat_config = { .first_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; +static enum ebpf_threads_status ebpf_cachestat_exited = NETDATA_THREAD_EBPF_RUNNING; netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "mark_page_accessed", .mode = EBPF_LOAD_TRAMPOLINE}, @@ -294,56 +290,48 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf *****************************************************************/ /** - * Clean PID structures + * Cachestat exit. * - * Clean the allocated structures. + * Cancel child and exit. + * + * @param ptr thread data. */ -void clean_cachestat_pid_structures() { - struct pid_stat *pids = root_of_pids; - while (pids) { - freez(cachestat_pid[pids->pid]); - - pids = pids->next; +static void ebpf_cachestat_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; } + + ebpf_cachestat_exited = NETDATA_THREAD_EBPF_STOPPING; } /** - * Clean up the main thread. + * Cachestat cleanup + * + * Clean up allocated addresses. * * @param ptr thread data. */ static void ebpf_cachestat_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) + if (ebpf_cachestat_exited != NETDATA_THREAD_EBPF_STOPPED) return; - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 2*USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } - ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated); freez(cachestat_vector); freez(cachestat_values); + freez(cachestat_threads.thread); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } #ifdef LIBBPF_MAJOR_VERSION - else if (bpf_obj) + if (bpf_obj) cachestat_bpf__destroy(bpf_obj); #endif + cachestat_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -367,23 +355,23 @@ void cachestat_update_publish(netdata_publish_cachestat_t *out, uint64_t mpa, ui uint64_t apcl, uint64_t apd) { // Adapted algorithm from https://github.com/iovisor/bcc/blob/master/tools/cachestat.py#L126-L138 - calculated_number total = (calculated_number) (((long long)mpa) - ((long long)mbd)); + NETDATA_DOUBLE total = (NETDATA_DOUBLE) (((long long)mpa) - ((long long)mbd)); if (total < 0) total = 0; - calculated_number misses = (calculated_number) ( ((long long) apcl) - ((long long) apd) ); + NETDATA_DOUBLE misses = (NETDATA_DOUBLE) ( ((long long) apcl) - ((long long) apd) ); if (misses < 0) misses = 0; // If hits are < 0, then its possible misses are overestimate due to possibly page cache read ahead adding // more pages than needed. In this case just assume misses as total and reset hits. - calculated_number hits = total - misses; + NETDATA_DOUBLE hits = total - misses; if (hits < 0 ) { misses = total; hits = 0; } - calculated_number ratio = (total > 0) ? hits/total : 1; + NETDATA_DOUBLE ratio = (total > 0) ? hits/total : 1; out->ratio = (long long )(ratio*100); out->hit = (long long)hits; @@ -607,6 +595,8 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr) 20093, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); + + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /***************************************************************** @@ -652,22 +642,25 @@ static void read_global_table() */ void *ebpf_cachestat_read_hash(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(ebpf_cachestat_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_LATENCY_CACHESTAT_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_cachestat_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_cachestat_exited == NETDATA_THREAD_EBPF_STOPPING) + break; read_global_table(); } - read_thread_closed = 1; + ebpf_cachestat_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -892,7 +885,7 @@ static int ebpf_send_systemd_cachestat_charts() for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_cachestat.ratio); - } else + } else if (unlikely(ect->systemd)) ret = 0; } write_end_chart(); @@ -1081,43 +1074,44 @@ void ebpf_cachestat_send_cgroup_data(int update_every) */ static void cachestat_collector(ebpf_module_t *em) { - cachestat_threads.thread = mallocz(sizeof(netdata_thread_t)); + cachestat_threads.thread = callocz(1, sizeof(netdata_thread_t)); cachestat_threads.start_routine = ebpf_cachestat_read_hash; - netdata_thread_create(cachestat_threads.thread, cachestat_threads.name, NETDATA_THREAD_OPTION_JOINABLE, + netdata_thread_create(cachestat_threads.thread, cachestat_threads.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_cachestat_read_hash, em); netdata_publish_cachestat_t publish; memset(&publish, 0, sizeof(publish)); - int apps = em->apps_charts; int cgroups = em->cgroup_charts; int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = update_every * USEC_PER_SEC; + //This will be cancelled by its parent + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; + + netdata_apps_integration_flags_t apps = em->apps_charts; pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - if (apps) - read_apps_table(); + if (apps) + read_apps_table(); - if (cgroups) - ebpf_update_cachestat_cgroup(); + if (cgroups) + ebpf_update_cachestat_cgroup(); - pthread_mutex_lock(&lock); + pthread_mutex_lock(&lock); - cachestat_send_global(&publish); + cachestat_send_global(&publish); - if (apps) - ebpf_cache_send_apps_data(apps_groups_root_target); + if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) + ebpf_cache_send_apps_data(apps_groups_root_target); - if (cgroups) - ebpf_cachestat_send_cgroup_data(update_every); - - pthread_mutex_unlock(&lock); - } + if (cgroups) + ebpf_cachestat_send_cgroup_data(update_every); + pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } @@ -1234,8 +1228,8 @@ static int ebpf_cachestat_load_bpf(ebpf_module_t *em) { int ret = 0; if (em->load == EBPF_LOAD_LEGACY) { - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { ret = -1; } } @@ -1266,7 +1260,7 @@ static int ebpf_cachestat_load_bpf(ebpf_module_t *em) */ void *ebpf_cachestat_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_cachestat_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_cachestat_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = cachestat_maps; diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h index b386e383..fdd88464 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.h +++ b/collectors/ebpf.plugin/ebpf_cachestat.h @@ -82,7 +82,6 @@ typedef struct netdata_publish_cachestat { } netdata_publish_cachestat_t; extern void *ebpf_cachestat_thread(void *ptr); -extern void clean_cachestat_pid_structures(); extern struct config cachestat_config; extern netdata_ebpf_targets_t cachestat_targets[]; diff --git a/collectors/ebpf.plugin/ebpf_cgroup.c b/collectors/ebpf.plugin/ebpf_cgroup.c index e6b483ba..24469c64 100644 --- a/collectors/ebpf.plugin/ebpf_cgroup.c +++ b/collectors/ebpf.plugin/ebpf_cgroup.c @@ -134,26 +134,6 @@ static inline void ebpf_clean_specific_cgroup_pids(struct pid_on_target2 *pt) } /** - * Cleanup link list - */ -void ebpf_clean_cgroup_pids() -{ - if (!ebpf_cgroup_pids) - return; - - ebpf_cgroup_target_t *ect = ebpf_cgroup_pids; - while (ect) { - ebpf_cgroup_target_t *next_cgroup = ect->next; - - ebpf_clean_specific_cgroup_pids(ect->pids); - freez(ect); - - ect = next_cgroup; - } - ebpf_cgroup_pids = NULL; -} - -/** * Remove Cgroup Update Target Update List * * Remove from cgroup target and update the link list diff --git a/collectors/ebpf.plugin/ebpf_cgroup.h b/collectors/ebpf.plugin/ebpf_cgroup.h index 03969194..cca9a950 100644 --- a/collectors/ebpf.plugin/ebpf_cgroup.h +++ b/collectors/ebpf.plugin/ebpf_cgroup.h @@ -63,7 +63,6 @@ typedef struct ebpf_cgroup_target { extern void ebpf_map_cgroup_shared_memory(); extern void ebpf_parse_cgroup_shm_data(); extern void ebpf_close_cgroup_shm(); -extern void ebpf_clean_cgroup_pids(); extern void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order, char *algorithm, char *context, char *module, int update_every); diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c index 619d8520..8cf063ca 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/collectors/ebpf.plugin/ebpf_dcstat.c @@ -10,14 +10,9 @@ static netdata_publish_syscall_t dcstat_counter_publish_aggregated[NETDATA_DCSTA netdata_dcstat_pid_t *dcstat_vector = NULL; netdata_publish_dcstat_t **dcstat_pid = NULL; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END]; static netdata_idx_t *dcstat_values = NULL; -static int read_thread_closed = 1; - struct config dcstat_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, @@ -27,6 +22,7 @@ struct config dcstat_config = { .first_section = NULL, struct netdata_static_thread dcstat_threads = {"DCSTAT KERNEL", NULL, NULL, 1, NULL, NULL, NULL}; +static enum ebpf_threads_status ebpf_dcstat_exited = NETDATA_THREAD_EBPF_RUNNING; static ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END, .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, @@ -239,8 +235,8 @@ static inline int ebpf_dc_load_and_attach(struct dc_bpf *obj, ebpf_module_t *em) */ void dcstat_update_publish(netdata_publish_dcstat_t *out, uint64_t cache_access, uint64_t not_found) { - calculated_number successful_access = (calculated_number) (((long long)cache_access) - ((long long)not_found)); - calculated_number ratio = (cache_access) ? successful_access/(calculated_number)cache_access : 0; + NETDATA_DOUBLE successful_access = (NETDATA_DOUBLE) (((long long)cache_access) - ((long long)not_found)); + NETDATA_DOUBLE ratio = (cache_access) ? successful_access/(NETDATA_DOUBLE)cache_access : 0; out->ratio = (long long )(ratio*100); } @@ -252,20 +248,6 @@ void dcstat_update_publish(netdata_publish_dcstat_t *out, uint64_t cache_access, *****************************************************************/ /** - * Clean PID structures - * - * Clean the allocated structures. - */ -void clean_dcstat_pid_structures() { - struct pid_stat *pids = root_of_pids; - while (pids) { - freez(dcstat_pid[pids->pid]); - - pids = pids->next; - } -} - -/** * Clean names * * Clean the optional names allocated during startup. @@ -280,6 +262,24 @@ void ebpf_dcstat_clean_names() } /** + * DCstat exit + * + * Cancel child and exit. + * + * @param ptr thread data. + */ +static void ebpf_dcstat_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; + } + + ebpf_dcstat_exited = NETDATA_THREAD_EBPF_STOPPING; +} + +/** * Clean up the main thread. * * @param ptr thread data. @@ -287,37 +287,24 @@ void ebpf_dcstat_clean_names() static void ebpf_dcstat_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) + if (ebpf_dcstat_exited != NETDATA_THREAD_EBPF_STOPPED) return; - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 2 * USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } - freez(dcstat_vector); freez(dcstat_values); + freez(dcstat_threads.thread); ebpf_cleanup_publish_syscall(dcstat_counter_publish_aggregated); ebpf_dcstat_clean_names(); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } #ifdef LIBBPF_MAJOR_VERSION - else if (bpf_obj) + if (bpf_obj) dc_bpf__destroy(bpf_obj); #endif + + dcstat_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -371,6 +358,8 @@ void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr) 20103, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); + + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /***************************************************************** @@ -535,22 +524,25 @@ static void read_global_table() */ void *ebpf_dcstat_read_hash(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(ebpf_dcstat_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_LATENCY_DCSTAT_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_dcstat_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_dcstat_exited == NETDATA_THREAD_EBPF_STOPPING) + break; read_global_table(); } - read_thread_closed = 1; + ebpf_dcstat_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -881,7 +873,7 @@ static int ebpf_send_systemd_dc_charts() for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->publish_dc.ratio); - } else + } else if (unlikely(ect->systemd)) ret = 0; } write_end_chart(); @@ -1013,40 +1005,40 @@ static void dcstat_collector(ebpf_module_t *em) dcstat_threads.thread = mallocz(sizeof(netdata_thread_t)); dcstat_threads.start_routine = ebpf_dcstat_read_hash; - netdata_thread_create(dcstat_threads.thread, dcstat_threads.name, NETDATA_THREAD_OPTION_JOINABLE, + netdata_thread_create(dcstat_threads.thread, dcstat_threads.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_dcstat_read_hash, em); netdata_publish_dcstat_t publish; memset(&publish, 0, sizeof(publish)); - int apps = em->apps_charts; int cgroups = em->cgroup_charts; int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - if (apps) - read_apps_table(); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; - if (cgroups) - ebpf_update_dc_cgroup(); + netdata_apps_integration_flags_t apps = em->apps_charts; + pthread_mutex_lock(&collect_data_mutex); + if (apps) + read_apps_table(); - pthread_mutex_lock(&lock); + if (cgroups) + ebpf_update_dc_cgroup(); - dcstat_send_global(&publish); + pthread_mutex_lock(&lock); - if (apps) - ebpf_dcache_send_apps_data(apps_groups_root_target); + dcstat_send_global(&publish); - if (cgroups) - ebpf_dc_send_cgroup_data(update_every); + if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) + ebpf_dcache_send_apps_data(apps_groups_root_target); - pthread_mutex_unlock(&lock); - } + if (cgroups) + ebpf_dc_send_cgroup_data(update_every); + pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } @@ -1125,8 +1117,8 @@ static int ebpf_dcstat_load_bpf(ebpf_module_t *em) { int ret = 0; if (em->load == EBPF_LOAD_LEGACY) { - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { ret = -1; } } @@ -1157,7 +1149,7 @@ static int ebpf_dcstat_load_bpf(ebpf_module_t *em) */ void *ebpf_dcstat_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_dcstat_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_dcstat_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = dcstat_maps; diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h index 94086473..5c4a80cd 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.h +++ b/collectors/ebpf.plugin/ebpf_dcstat.h @@ -77,7 +77,6 @@ typedef struct netdata_publish_dcstat { extern void *ebpf_dcstat_thread(void *ptr); extern void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr); -extern void clean_dcstat_pid_structures(); extern struct config dcstat_config; extern netdata_ebpf_targets_t dc_targets[]; diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c index 3ddf50b9..96b1705c 100644 --- a/collectors/ebpf.plugin/ebpf_disk.c +++ b/collectors/ebpf.plugin/ebpf_disk.c @@ -25,9 +25,6 @@ char *tracepoint_block_type = { "block"} ; char *tracepoint_block_issue = { "block_rq_issue" }; char *tracepoint_block_rq_complete = { "block_rq_complete" }; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - static int was_block_issue_enabled = 0; static int was_block_rq_complete_enabled = 0; @@ -35,12 +32,11 @@ static char **dimensions = NULL; static netdata_syscall_stat_t disk_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS]; static netdata_publish_syscall_t disk_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS]; -static int read_thread_closed = 1; - static netdata_idx_t *disk_hash_values = NULL; static struct netdata_static_thread disk_threads = {"DISK KERNEL", NULL, NULL, 1, NULL, NULL, NULL }; +static enum ebpf_threads_status ebpf_disk_exited = NETDATA_THREAD_EBPF_RUNNING; ebpf_publish_disk_t *plot_disks = NULL; pthread_mutex_t plot_mutex; @@ -428,25 +424,37 @@ static void ebpf_cleanup_disk_list() } /** - * Clean up the main thread. + * Disk exit. + * + * Cancel child and exit. * * @param ptr thread data. */ -static void ebpf_disk_cleanup(void *ptr) +static void ebpf_disk_exit(void *ptr) { - ebpf_disk_disable_tracepoints(); + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; + } + ebpf_disk_exited = NETDATA_THREAD_EBPF_STOPPING; +} + +/** + * Disk Cleanup + * + * Clean up allocated memory. + * + * @param ptr thread data. + */ +static void ebpf_disk_cleanup(void *ptr) +{ ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) + if (ebpf_disk_exited != NETDATA_THREAD_EBPF_STOPPED) return; - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 2 * USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } + ebpf_disk_disable_tracepoints(); if (dimensions) ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS); @@ -458,15 +466,8 @@ static void ebpf_disk_cleanup(void *ptr) ebpf_cleanup_plot_disks(); ebpf_cleanup_disk_list(); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } + disk_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -582,19 +583,25 @@ static void read_hard_disk_tables(int table) */ void *ebpf_disk_read_hash(void *ptr) { + netdata_thread_cleanup_push(ebpf_disk_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_LATENCY_DISK_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_disk_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_disk_exited == NETDATA_THREAD_EBPF_STOPPING) + break; read_hard_disk_tables(disk_maps[NETDATA_DISK_READ].map_fd); } + ebpf_disk_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -724,30 +731,26 @@ static void disk_collector(ebpf_module_t *em) disk_threads.thread = mallocz(sizeof(netdata_thread_t)); disk_threads.start_routine = ebpf_disk_read_hash; - netdata_thread_create(disk_threads.thread, disk_threads.name, NETDATA_THREAD_OPTION_JOINABLE, + netdata_thread_create(disk_threads.thread, disk_threads.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_disk_read_hash, em); int update_every = em->update_every; - int counter = update_every - 1; - read_thread_closed = 0; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - pthread_mutex_lock(&lock); - ebpf_remove_pointer_from_plot_disk(em); - ebpf_latency_send_hd_data(update_every); - - pthread_mutex_unlock(&lock); - } + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; - pthread_mutex_unlock(&collect_data_mutex); + pthread_mutex_lock(&lock); + ebpf_remove_pointer_from_plot_disk(em); + ebpf_latency_send_hd_data(update_every); + + pthread_mutex_unlock(&lock); ebpf_update_disks(em); } - read_thread_closed = 1; } /***************************************************************** @@ -797,7 +800,7 @@ static int ebpf_disk_enable_tracepoints() */ void *ebpf_disk_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_disk_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_disk_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = disk_maps; @@ -822,8 +825,8 @@ void *ebpf_disk_thread(void *ptr) goto enddisk; } - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = 0; goto enddisk; } diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c index 10a50c4e..b4e577da 100644 --- a/collectors/ebpf.plugin/ebpf_fd.c +++ b/collectors/ebpf.plugin/ebpf_fd.c @@ -29,12 +29,9 @@ struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - struct netdata_static_thread fd_thread = {"FD KERNEL", NULL, NULL, 1, NULL, NULL, NULL}; -static int read_thread_closed = 1; +static enum ebpf_threads_status ebpf_fd_exited = NETDATA_THREAD_EBPF_RUNNING; static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER]; static netdata_idx_t *fd_values = NULL; @@ -48,17 +45,21 @@ netdata_fd_stat_t **fd_pid = NULL; *****************************************************************/ /** - * Clean PID structures + * FD Exit + * + * Cancel child thread and exit. * - * Clean the allocated structures. + * @param ptr thread data. */ -void clean_fd_pid_structures() { - struct pid_stat *pids = root_of_pids; - while (pids) { - freez(fd_pid[pids->pid]); - - pids = pids->next; +static void ebpf_fd_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; } + + ebpf_fd_exited = NETDATA_THREAD_EBPF_STOPPING; } /** @@ -69,31 +70,16 @@ void clean_fd_pid_structures() { static void ebpf_fd_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) + if (ebpf_fd_exited != NETDATA_THREAD_EBPF_STOPPED) return; - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 2 * USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } - ebpf_cleanup_publish_syscall(fd_publish_aggregated); freez(fd_thread.thread); freez(fd_values); freez(fd_vector); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } + fd_thread.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -161,21 +147,24 @@ static void read_global_table() */ void *ebpf_fd_read_hash(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(ebpf_fd_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_FD_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_fd_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_fd_exited == NETDATA_THREAD_EBPF_STOPPING) + break; read_global_table(); } - read_thread_closed = 1; + ebpf_fd_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -572,7 +561,7 @@ static int ebpf_send_systemd_fd_charts(ebpf_module_t *em) for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_fd.open_call); - } else + } else if (unlikely(ect->systemd)) ret = 0; } write_end_chart(); @@ -665,38 +654,37 @@ static void fd_collector(ebpf_module_t *em) fd_thread.thread = mallocz(sizeof(netdata_thread_t)); fd_thread.start_routine = ebpf_fd_read_hash; - netdata_thread_create(fd_thread.thread, fd_thread.name, NETDATA_THREAD_OPTION_JOINABLE, + netdata_thread_create(fd_thread.thread, fd_thread.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_fd_read_hash, em); - int apps = em->apps_charts; int cgroups = em->cgroup_charts; - int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = em->update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; - if (++counter == update_every) { - counter = 0; - if (apps) - read_apps_table(); + netdata_apps_integration_flags_t apps = em->apps_charts; + pthread_mutex_lock(&collect_data_mutex); + if (apps) + read_apps_table(); - if (cgroups) - ebpf_update_fd_cgroup(); + if (cgroups) + ebpf_update_fd_cgroup(); - pthread_mutex_lock(&lock); + pthread_mutex_lock(&lock); - ebpf_fd_send_data(em); + ebpf_fd_send_data(em); - if (apps) - ebpf_fd_send_apps_data(em, apps_groups_root_target); + if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) + ebpf_fd_send_apps_data(em, apps_groups_root_target); - if (cgroups) - ebpf_fd_send_cgroup_data(em); - - pthread_mutex_unlock(&lock); - } + if (cgroups) + ebpf_fd_send_cgroup_data(em); + pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } @@ -756,6 +744,8 @@ void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr) ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); } + + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /** @@ -831,7 +821,7 @@ static void ebpf_fd_allocate_global_vectors(int apps) */ void *ebpf_fd_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_fd_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_fd_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = fd_maps; @@ -841,8 +831,8 @@ void *ebpf_fd_thread(void *ptr) ebpf_fd_allocate_global_vectors(em->apps_charts); - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; goto endfd; } diff --git a/collectors/ebpf.plugin/ebpf_fd.h b/collectors/ebpf.plugin/ebpf_fd.h index 851e040e..8742558d 100644 --- a/collectors/ebpf.plugin/ebpf_fd.h +++ b/collectors/ebpf.plugin/ebpf_fd.h @@ -79,7 +79,6 @@ extern void *ebpf_fd_thread(void *ptr); extern void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr); extern struct config fd_config; extern netdata_fd_stat_t **fd_pid; -extern void clean_fd_pid_structures(); #endif /* NETDATA_EBPF_FD_H */ diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c index 415a42db..bc767fbc 100644 --- a/collectors/ebpf.plugin/ebpf_filesystem.c +++ b/collectors/ebpf.plugin/ebpf_filesystem.c @@ -30,67 +30,11 @@ static ebpf_local_maps_t fs_maps[] = {{.name = "tbl_ext4", .internal_input = NET .type = NETDATA_EBPF_MAP_CONTROLLER, .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; -ebpf_filesystem_partitions_t localfs[] = - {{.filesystem = "ext4", - .optional_filesystem = NULL, - .family = "ext4", - .objects = NULL, - .probe_links = NULL, - .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, - .enabled = CONFIG_BOOLEAN_YES, - .addresses = {.function = NULL, .addr = 0}, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4}, - {.filesystem = "xfs", - .optional_filesystem = NULL, - .family = "xfs", - .objects = NULL, - .probe_links = NULL, - .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, - .enabled = CONFIG_BOOLEAN_YES, - .addresses = {.function = NULL, .addr = 0}, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4}, - {.filesystem = "nfs", - .optional_filesystem = "nfs4", - .family = "nfs", - .objects = NULL, - .probe_links = NULL, - .flags = NETDATA_FILESYSTEM_ATTR_CHARTS, - .enabled = CONFIG_BOOLEAN_YES, - .addresses = {.function = NULL, .addr = 0}, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4}, - {.filesystem = "zfs", - .optional_filesystem = NULL, - .family = "zfs", - .objects = NULL, - .probe_links = NULL, - .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, - .enabled = CONFIG_BOOLEAN_YES, - .addresses = {.function = NULL, .addr = 0}, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4}, - {.filesystem = "btrfs", - .optional_filesystem = NULL, - .family = "btrfs", - .objects = NULL, - .probe_links = NULL, - .flags = NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE, - .enabled = CONFIG_BOOLEAN_YES, - .addresses = {.function = "btrfs_file_operations", .addr = 0}, - .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10}, - {.filesystem = NULL, - .optional_filesystem = NULL, - .family = NULL, - .objects = NULL, - .probe_links = NULL, - .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, - .enabled = CONFIG_BOOLEAN_YES, - .addresses = {.function = NULL, .addr = 0}, - .kernels = 0}}; - struct netdata_static_thread filesystem_threads = {"EBPF FS READ", NULL, NULL, 1, NULL, NULL, NULL }; +static enum ebpf_threads_status ebpf_fs_exited = NETDATA_THREAD_EBPF_RUNNING; -static int read_thread_closed = 1; static netdata_syscall_stat_t filesystem_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS]; static netdata_publish_syscall_t filesystem_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS]; @@ -381,30 +325,44 @@ void ebpf_filesystem_cleanup_ebpf_data() bpf_link__destroy(probe_links[j]); j++; } - bpf_object__close(efp->objects); + freez(probe_links); + if (efp->objects) + bpf_object__close(efp->objects); } } } /** - * Clean up the main thread. + * Filesystem exit + * + * Cancel child thread. * * @param ptr thread data. */ -static void ebpf_filesystem_cleanup(void *ptr) +static void ebpf_filesystem_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; return; - - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 2*USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); } + ebpf_fs_exited = NETDATA_THREAD_EBPF_STOPPING; +} + +/** + * File system cleanup + * + * Clean up allocated thread. + * + * @param ptr thread data. + */ +static void ebpf_filesystem_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (ebpf_fs_exited != NETDATA_THREAD_EBPF_STOPPED) + return; + freez(filesystem_threads.thread); ebpf_cleanup_publish_syscall(filesystem_publish_aggregated); @@ -412,6 +370,9 @@ static void ebpf_filesystem_cleanup(void *ptr) if (dimensions) ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS); freez(filesystem_hash_values); + + filesystem_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -515,16 +476,18 @@ static void read_filesystem_tables() */ void *ebpf_filesystem_read_hash(void *ptr) { + netdata_thread_cleanup_push(ebpf_filesystem_cleanup, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; - read_thread_closed = 0; heartbeat_t hb; heartbeat_init(&hb); usec_t step = NETDATA_FILESYSTEM_READ_SLEEP_MS * em->update_every; int update_every = em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_fs_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_fs_exited == NETDATA_THREAD_EBPF_STOPPING) + break; (void) ebpf_update_partitions(em); ebpf_obsolete_fs_charts(update_every); @@ -536,7 +499,9 @@ void *ebpf_filesystem_read_hash(void *ptr) read_filesystem_tables(); } - read_thread_closed = 1; + ebpf_fs_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -578,25 +543,23 @@ static void filesystem_collector(ebpf_module_t *em) filesystem_threads.start_routine = ebpf_filesystem_read_hash; netdata_thread_create(filesystem_threads.thread, filesystem_threads.name, - NETDATA_THREAD_OPTION_JOINABLE, ebpf_filesystem_read_hash, em); + NETDATA_THREAD_OPTION_DEFAULT, ebpf_filesystem_read_hash, em); int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin || em->optional) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - pthread_mutex_lock(&lock); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; - ebpf_create_fs_charts(update_every); - ebpf_histogram_send_data(); + pthread_mutex_lock(&lock); - pthread_mutex_unlock(&lock); - } + ebpf_create_fs_charts(update_every); + ebpf_histogram_send_data(); - pthread_mutex_unlock(&collect_data_mutex); + pthread_mutex_unlock(&lock); } } @@ -634,7 +597,7 @@ static void ebpf_update_filesystem() */ void *ebpf_filesystem_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_filesystem_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_filesystem_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = fs_maps; diff --git a/collectors/ebpf.plugin/ebpf_filesystem.h b/collectors/ebpf.plugin/ebpf_filesystem.h index 8b7c54c5..f6a10c87 100644 --- a/collectors/ebpf.plugin/ebpf_filesystem.h +++ b/collectors/ebpf.plugin/ebpf_filesystem.h @@ -43,26 +43,6 @@ enum netdata_filesystem_table { NETDATA_ADDR_FS_TABLE }; -typedef struct ebpf_filesystem_partitions { - char *filesystem; - char *optional_filesystem; - char *family; - char *family_name; - struct bpf_object *objects; - struct bpf_link **probe_links; - - netdata_ebpf_histogram_t hread; - netdata_ebpf_histogram_t hwrite; - netdata_ebpf_histogram_t hopen; - netdata_ebpf_histogram_t hadditional; - - uint32_t flags; - uint32_t enabled; - - ebpf_addresses_t addresses; - uint64_t kernels; -} ebpf_filesystem_partitions_t; - extern void *ebpf_filesystem_thread(void *ptr); extern struct config fs_config; diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c index 25b2a0ec..41a88164 100644 --- a/collectors/ebpf.plugin/ebpf_hardirq.c +++ b/collectors/ebpf.plugin/ebpf_hardirq.c @@ -125,11 +125,6 @@ static hardirq_static_val_t hardirq_static_vals[] = { }, }; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - -static int read_thread_closed = 1; - // store for "published" data from the reader thread, which the collector // thread will write to netdata agent. static avl_tree_lock hardirq_pub; @@ -143,44 +138,49 @@ static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL; static struct netdata_static_thread hardirq_threads = {"HARDIRQ KERNEL", NULL, NULL, 1, NULL, NULL, NULL }; +static enum ebpf_threads_status ebpf_hardirq_exited = NETDATA_THREAD_EBPF_RUNNING; /** - * Clean up the main thread. + * Hardirq Exit + * + * Cancel child and exit. * * @param ptr thread data. */ -static void hardirq_cleanup(void *ptr) +static void hardirq_exit(void *ptr) { - for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) { - ebpf_disable_tracepoint(&hardirq_tracepoints[i]); - } - ebpf_module_t *em = (ebpf_module_t *)ptr; if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; return; } - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 1 * USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } + ebpf_hardirq_exited = NETDATA_THREAD_EBPF_STOPPING; +} +/** + * Hardirq clean up + * + * Clean up allocated memory. + * + * @param ptr thread data. + */ +static void hardirq_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + + if (ebpf_hardirq_exited != NETDATA_THREAD_EBPF_STOPPED) + return; + + freez(hardirq_threads.thread); + for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) { + ebpf_disable_tracepoint(&hardirq_tracepoints[i]); + } freez(hardirq_ebpf_vals); freez(hardirq_ebpf_static_vals); - freez(hardirq_threads.thread); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } + hardirq_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -316,23 +316,25 @@ static void hardirq_read_latency_static_map(int mapfd) */ static void *hardirq_reader(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(hardirq_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_HARDIRQ_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_hardirq_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); UNUSED(dt); + if (ebpf_hardirq_exited == NETDATA_THREAD_EBPF_STOPPING) + break; hardirq_read_latency_map(hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd); hardirq_read_latency_static_map(hardirq_maps[HARDIRQ_MAP_LATENCY_STATIC].map_fd); } + ebpf_hardirq_exited = NETDATA_THREAD_EBPF_STOPPED; - read_thread_closed = 1; + netdata_thread_cleanup_pop(1); return NULL; } @@ -419,7 +421,7 @@ static void hardirq_collector(ebpf_module_t *em) netdata_thread_create( hardirq_threads.thread, hardirq_threads.name, - NETDATA_THREAD_OPTION_JOINABLE, + NETDATA_THREAD_OPTION_DEFAULT, hardirq_reader, em ); @@ -432,26 +434,24 @@ static void hardirq_collector(ebpf_module_t *em) pthread_mutex_unlock(&lock); // loop and read from published data until ebpf plugin is closed. - int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - pthread_mutex_lock(&lock); - - // write dims now for all hitherto discovered IRQs. - write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "hardirq_latency"); - avl_traverse_lock(&hardirq_pub, hardirq_write_dims, NULL); - hardirq_write_static_dims(); - write_end_chart(); - - pthread_mutex_unlock(&lock); - } - - pthread_mutex_unlock(&collect_data_mutex); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = em->update_every * USEC_PER_SEC; + //This will be cancelled by its parent + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; + + pthread_mutex_lock(&lock); + + // write dims now for all hitherto discovered IRQs. + write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "hardirq_latency"); + avl_traverse_lock(&hardirq_pub, hardirq_write_dims, NULL); + hardirq_write_static_dims(); + write_end_chart(); + + pthread_mutex_unlock(&lock); } } @@ -467,7 +467,7 @@ static void hardirq_collector(ebpf_module_t *em) */ void *ebpf_hardirq_thread(void *ptr) { - netdata_thread_cleanup_push(hardirq_cleanup, ptr); + netdata_thread_cleanup_push(hardirq_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = hardirq_maps; @@ -481,8 +481,8 @@ void *ebpf_hardirq_thread(void *ptr) goto endhardirq; } - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; goto endhardirq; } diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c index 9f75543d..4dca0450 100644 --- a/collectors/ebpf.plugin/ebpf_mdflush.c +++ b/collectors/ebpf.plugin/ebpf_mdflush.c @@ -35,47 +35,47 @@ static avl_tree_lock mdflush_pub; // tmp store for mdflush values we get from a per-CPU eBPF map. static mdflush_ebpf_val_t *mdflush_ebpf_vals = NULL; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - -static int read_thread_closed = 1; - static struct netdata_static_thread mdflush_threads = {"MDFLUSH KERNEL", NULL, NULL, 1, NULL, NULL, NULL }; +static enum ebpf_threads_status ebpf_mdflush_exited = NETDATA_THREAD_EBPF_RUNNING; /** - * Clean up the main thread. + * MDflush exit + * + * Cancel thread and exit. * * @param ptr thread data. */ -static void mdflush_cleanup(void *ptr) +static void mdflush_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; return; } - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 1 * USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } + ebpf_mdflush_exited = NETDATA_THREAD_EBPF_STOPPING; +} + +/** + * CLeanup + * + * Clean allocated memory. + * + * @param ptr thread data. + */ +static void mdflush_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (ebpf_mdflush_exited != NETDATA_THREAD_EBPF_STOPPED) + return; freez(mdflush_ebpf_vals); freez(mdflush_threads.thread); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } + mdflush_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /** @@ -175,22 +175,25 @@ static void mdflush_read_count_map() */ static void *mdflush_reader(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(mdflush_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_MDFLUSH_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_mdflush_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); UNUSED(dt); + if (ebpf_mdflush_exited == NETDATA_THREAD_EBPF_STOPPING) + break; mdflush_read_count_map(); } - read_thread_closed = 1; + ebpf_mdflush_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -248,7 +251,7 @@ static void mdflush_collector(ebpf_module_t *em) netdata_thread_create( mdflush_threads.thread, mdflush_threads.name, - NETDATA_THREAD_OPTION_JOINABLE, + NETDATA_THREAD_OPTION_DEFAULT, mdflush_reader, em ); @@ -260,25 +263,20 @@ static void mdflush_collector(ebpf_module_t *em) pthread_mutex_unlock(&lock); // loop and read from published data until ebpf plugin is closed. - int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - pthread_mutex_lock(&lock); - - // write dims now for all hitherto discovered devices. - write_begin_chart("mdstat", "mdstat_flush"); - avl_traverse_lock(&mdflush_pub, mdflush_write_dims, NULL); - write_end_chart(); - - pthread_mutex_unlock(&lock); - } - - pthread_mutex_unlock(&collect_data_mutex); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = em->update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; + + // write dims now for all hitherto discovered devices. + write_begin_chart("mdstat", "mdstat_flush"); + avl_traverse_lock(&mdflush_pub, mdflush_write_dims, NULL); + write_end_chart(); + + pthread_mutex_unlock(&lock); } } @@ -290,7 +288,7 @@ static void mdflush_collector(ebpf_module_t *em) */ void *ebpf_mdflush_thread(void *ptr) { - netdata_thread_cleanup_push(mdflush_cleanup, ptr); + netdata_thread_cleanup_push(mdflush_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = mdflush_maps; @@ -306,8 +304,8 @@ void *ebpf_mdflush_thread(void *ptr) goto endmdflush; } - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; goto endmdflush; } diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c index 1ba1e135..bca467bc 100644 --- a/collectors/ebpf.plugin/ebpf_mount.c +++ b/collectors/ebpf.plugin/ebpf_mount.c @@ -18,12 +18,8 @@ struct config mount_config = { .first_section = NULL, .last_section = NULL, .mut .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; -static int read_thread_closed = 1; static netdata_idx_t *mount_values = NULL; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END]; struct netdata_static_thread mount_thread = {"MOUNT KERNEL", @@ -33,6 +29,7 @@ struct netdata_static_thread mount_thread = {"MOUNT KERNEL", netdata_ebpf_targets_t mount_targets[] = { {.name = "mount", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "umount", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}}; +static enum ebpf_threads_status ebpf_mount_exited = NETDATA_THREAD_EBPF_RUNNING; #ifdef LIBBPF_MAJOR_VERSION #include "includes/mount.skel.h" // BTF code @@ -227,33 +224,46 @@ static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_ *****************************************************************/ /** - * Clean up the main thread. + * Mount Exit + * + * Cancel child thread. + * + * @param ptr thread data. + */ +static void ebpf_mount_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; + } + + ebpf_mount_exited = NETDATA_THREAD_EBPF_STOPPING; +} + +/** + * Mount cleanup + * + * Clean up allocated memory. * * @param ptr thread data. */ static void ebpf_mount_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) + if (ebpf_mount_exited != NETDATA_THREAD_EBPF_STOPPED) return; freez(mount_thread.thread); freez(mount_values); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } #ifdef LIBBPF_MAJOR_VERSION - else if (bpf_obj) + if (bpf_obj) mount_bpf__destroy(bpf_obj); #endif + mount_thread.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -299,22 +309,26 @@ static void read_global_table() */ void *ebpf_mount_read_hash(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(ebpf_mount_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_LATENCY_MOUNT_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + //This will be cancelled by its parent + while (ebpf_mount_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_mount_exited == NETDATA_THREAD_EBPF_STOPPING) + break; read_global_table(); } - read_thread_closed = 1; + ebpf_mount_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -348,25 +362,22 @@ static void mount_collector(ebpf_module_t *em) mount_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t)); - netdata_thread_create(mount_thread.thread, mount_thread.name, NETDATA_THREAD_OPTION_JOINABLE, + netdata_thread_create(mount_thread.thread, mount_thread.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_mount_read_hash, em); - int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - pthread_mutex_lock(&lock); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = em->update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; - ebpf_mount_send_data(); + pthread_mutex_lock(&lock); - pthread_mutex_unlock(&lock); - } + ebpf_mount_send_data(); - pthread_mutex_unlock(&collect_data_mutex); + pthread_mutex_unlock(&lock); } } @@ -425,8 +436,8 @@ static int ebpf_mount_load_bpf(ebpf_module_t *em) { int ret = 0; if (em->load == EBPF_LOAD_LEGACY) { - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; ret = -1; } @@ -458,7 +469,7 @@ static int ebpf_mount_load_bpf(ebpf_module_t *em) */ void *ebpf_mount_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_mount_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_mount_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = mount_maps; diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c index 463a3290..33f505b0 100644 --- a/collectors/ebpf.plugin/ebpf_oomkill.c +++ b/collectors/ebpf.plugin/ebpf_oomkill.c @@ -34,9 +34,6 @@ static ebpf_tracepoint_t oomkill_tracepoints[] = { {.enabled = false, .class = NULL, .event = NULL} }; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - static netdata_publish_syscall_t oomkill_publish_aggregated = {.name = "oomkill", .dimension = "oomkill", .algorithm = "absolute", .next = NULL}; @@ -49,19 +46,8 @@ static netdata_publish_syscall_t oomkill_publish_aggregated = {.name = "oomkill" static void oomkill_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { - return; - } - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } + em->enabled = NETDATA_MAIN_THREAD_EXITED; } static void oomkill_write_data(int32_t *keys, uint32_t total) @@ -159,7 +145,7 @@ static int ebpf_send_systemd_oomkill_charts() if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->oomkill); ect->oomkill = 0; - } else + } else if (unlikely(ect->systemd)) ret = 0; } write_end_chart(); @@ -312,34 +298,36 @@ static void oomkill_collector(ebpf_module_t *em) { int cgroups = em->cgroup_charts; int update_every = em->update_every; - int counter = update_every - 1; int32_t keys[NETDATA_OOMKILL_MAX_ENTRIES]; memset(keys, 0, sizeof(keys)); // loop and read until ebpf plugin is closed. - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; - if (++counter == update_every) { - counter = 0; - pthread_mutex_lock(&lock); + pthread_mutex_lock(&collect_data_mutex); + pthread_mutex_lock(&lock); - uint32_t count = oomkill_read_data(keys); - if (cgroups && count) - ebpf_update_oomkill_cgroup(keys, count); + uint32_t count = oomkill_read_data(keys); + if (cgroups && count) + ebpf_update_oomkill_cgroup(keys, count); - // write everything from the ebpf map. - if (cgroups) - ebpf_oomkill_send_cgroup_data(update_every); + // write everything from the ebpf map. + if (cgroups) + ebpf_oomkill_send_cgroup_data(update_every); + if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) { write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_OOMKILL_CHART); oomkill_write_data(keys, count); write_end_chart(); - - pthread_mutex_unlock(&lock); } + pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } @@ -362,6 +350,8 @@ void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr) 20020, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], root, em->update_every, NETDATA_EBPF_MODULE_NAME_OOMKILL); + + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /** @@ -395,8 +385,8 @@ void *ebpf_oomkill_thread(void *ptr) goto endoomkill; } - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; goto endoomkill; } diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c index f894f070..f6b379a5 100644 --- a/collectors/ebpf.plugin/ebpf_process.c +++ b/collectors/ebpf.plugin/ebpf_process.c @@ -47,9 +47,6 @@ ebpf_process_publish_apps_t **current_apps_data = NULL; int process_enabled = 0; -static struct bpf_object *objects = NULL; -static struct bpf_link **probe_links = NULL; - struct config process_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, @@ -58,6 +55,7 @@ struct config process_config = { .first_section = NULL, static struct netdata_static_thread cgroup_thread = {"EBPF CGROUP", NULL, NULL, 1, NULL, NULL, NULL}; +static enum ebpf_threads_status ebpf_process_exited = NETDATA_THREAD_EBPF_RUNNING; static char *threads_stat[NETDATA_EBPF_THREAD_STAT_END] = {"total", "running"}; static char *load_event_stat[NETDATA_EBPF_LOAD_STAT_END] = {"legacy", "co-re"}; @@ -177,11 +175,9 @@ void ebpf_process_remove_pids() uint32_t pid = pids->pid; ebpf_process_stat_t *w = global_process_stats[pid]; if (w) { - if (w->removeme) { - freez(w); - global_process_stats[pid] = NULL; - bpf_map_delete_elem(pid_fd, &pid); - } + freez(w); + global_process_stats[pid] = NULL; + bpf_map_delete_elem(pid_fd, &pid); } pids = pids->next; @@ -568,6 +564,8 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr) root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); } + + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /** @@ -624,6 +622,72 @@ static void ebpf_create_apps_charts(struct target *root) /***************************************************************** * + * FUNCTIONS TO CLOSE THE THREAD + * + *****************************************************************/ + +/** + * Process disable tracepoints + * + * Disable tracepoints when the plugin was responsible to enable it. + */ +static void ebpf_process_disable_tracepoints() +{ + char *default_message = { "Cannot disable the tracepoint" }; + if (!was_sched_process_exit_enabled) { + if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exit)) + error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exit); + } + + if (!was_sched_process_exec_enabled) { + if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exec)) + error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exec); + } + + if (!was_sched_process_fork_enabled) { + if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_fork)) + error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_fork); + } +} + +/** + * Process Exit + * + * Cancel child thread. + * + * @param ptr thread data. + */ +static void ebpf_process_exit(void *ptr) +{ + (void)ptr; + ebpf_process_exited = NETDATA_THREAD_EBPF_STOPPING; +} + +/** + * Process cleanup + * + * Cleanup allocated memory. + * + * @param ptr thread data. + */ +static void ebpf_process_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (ebpf_process_exited != NETDATA_THREAD_EBPF_STOPPED) + return; + + ebpf_cleanup_publish_syscall(process_publish_aggregated); + freez(process_hash_values); + freez(cgroup_thread.thread); + + ebpf_process_disable_tracepoints(); + + cgroup_thread.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +/***************************************************************** + * * FUNCTIONS WITH THE MAIN LOOP * *****************************************************************/ @@ -640,24 +704,33 @@ static void ebpf_create_apps_charts(struct target *root) */ void *ebpf_cgroup_update_shm(void *ptr) { - UNUSED(ptr); + netdata_thread_cleanup_push(ebpf_process_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); - usec_t step = 30 * USEC_PER_SEC; - while (!close_ebpf_plugin) { + usec_t step = 3 * USEC_PER_SEC; + int counter = NETDATA_EBPF_CGROUP_UPDATE - 1; + //This will be cancelled by its parent + while (ebpf_process_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; - - if (close_ebpf_plugin) + if (ebpf_process_exited == NETDATA_THREAD_EBPF_STOPPING) break; - if (!shm_ebpf_cgroup.header) - ebpf_map_cgroup_shared_memory(); + // We are using a small heartbeat time to wake up thread, + // but we should not update so frequently the shared memory data + if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) { + counter = 0; + if (!shm_ebpf_cgroup.header) + ebpf_map_cgroup_shared_memory(); - ebpf_parse_cgroup_shm_data(); + ebpf_parse_cgroup_shm_data(); + } } + ebpf_process_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -884,7 +957,7 @@ static int ebpf_send_systemd_process_charts(ebpf_module_t *em) for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_ps.create_process); - } else + } else if (unlikely(ect->systemd)) ret = 0; } write_end_chart(); @@ -1019,32 +1092,36 @@ static void process_collector(ebpf_module_t *em) cgroup_thread.thread = mallocz(sizeof(netdata_thread_t)); cgroup_thread.start_routine = ebpf_cgroup_update_shm; - netdata_thread_create(cgroup_thread.thread, cgroup_thread.name, NETDATA_THREAD_OPTION_JOINABLE, + netdata_thread_create(cgroup_thread.thread, cgroup_thread.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_cgroup_update_shm, em); heartbeat_t hb; heartbeat_init(&hb); int publish_global = em->global_charts; - int apps_enabled = em->apps_charts; int cgroups = em->cgroup_charts; int thread_enabled = em->enabled; if (cgroups) ebpf_process_update_cgroup_algorithm(); + int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT; int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd; int update_every = em->update_every; int counter = update_every - 1; - while (!close_ebpf_plugin) { + int update_apps_list = update_apps_every - 1; + while (!ebpf_exit_plugin) { usec_t dt = heartbeat_next(&hb, USEC_PER_SEC); (void)dt; + if (ebpf_exit_plugin) + break; pthread_mutex_lock(&collect_data_mutex); - cleanup_exited_pids(); - collect_data_for_all_processes(pid_fd); + if (++update_apps_list == update_apps_every) { + update_apps_list = 0; + cleanup_exited_pids(); + collect_data_for_all_processes(pid_fd); - ebpf_create_apps_charts(apps_groups_root_target); - - pthread_cond_broadcast(&collect_data_cond_var); + ebpf_create_apps_charts(apps_groups_root_target); + } pthread_mutex_unlock(&collect_data_mutex); if (++counter == update_every) { @@ -1052,10 +1129,10 @@ static void process_collector(ebpf_module_t *em) read_hash_global_tables(); - int publish_apps = 0; + netdata_apps_integration_flags_t apps_enabled = em->apps_charts; + pthread_mutex_lock(&collect_data_mutex); if (all_pids_count > 0) { if (apps_enabled) { - publish_apps = 1; ebpf_process_update_apps_data(); } @@ -1072,7 +1149,7 @@ static void process_collector(ebpf_module_t *em) ebpf_process_send_data(em); } - if (publish_apps) { + if (apps_enabled & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) { ebpf_process_send_apps_data(apps_groups_root_target, em); } @@ -1081,6 +1158,7 @@ static void process_collector(ebpf_module_t *em) } } pthread_mutex_unlock(&lock); + pthread_mutex_unlock(&collect_data_mutex); } fflush(stdout); @@ -1089,89 +1167,6 @@ static void process_collector(ebpf_module_t *em) /***************************************************************** * - * FUNCTIONS TO CLOSE THE THREAD - * - *****************************************************************/ - -void clean_global_memory() { - int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd; - struct pid_stat *pids = root_of_pids; - while (pids) { - uint32_t pid = pids->pid; - freez(global_process_stats[pid]); - - bpf_map_delete_elem(pid_fd, &pid); - freez(current_apps_data[pid]); - - pids = pids->next; - } -} - -/** - * Process disable tracepoints - * - * Disable tracepoints when the plugin was responsible to enable it. - */ -static void ebpf_process_disable_tracepoints() -{ - char *default_message = { "Cannot disable the tracepoint" }; - if (!was_sched_process_exit_enabled) { - if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exit)) - error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exit); - } - - if (!was_sched_process_exec_enabled) { - if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exec)) - error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exec); - } - - if (!was_sched_process_fork_enabled) { - if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_fork)) - error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_fork); - } -} - -/** - * Clean up the main thread. - * - * @param ptr thread data. - */ -static void ebpf_process_cleanup(void *ptr) -{ - UNUSED(ptr); - - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 1 * USEC_PER_SEC; - while (!finalized_threads) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } - - ebpf_cleanup_publish_syscall(process_publish_aggregated); - freez(process_hash_values); - - clean_global_memory(); - freez(global_process_stats); - freez(current_apps_data); - - ebpf_process_disable_tracepoints(); - - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } - - freez(cgroup_thread.thread); -} - -/***************************************************************** - * * FUNCTIONS TO START THREAD * *****************************************************************/ @@ -1293,7 +1288,7 @@ static int ebpf_process_enable_tracepoints() */ void *ebpf_process_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_process_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_process_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = process_maps; @@ -1309,8 +1304,8 @@ void *ebpf_process_thread(void *ptr) ebpf_update_pid_table(&process_maps[0], em); set_local_pointers(); - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; pthread_mutex_unlock(&lock); goto endprocess; diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h index b0377b5d..43df34d4 100644 --- a/collectors/ebpf.plugin/ebpf_process.h +++ b/collectors/ebpf.plugin/ebpf_process.h @@ -39,6 +39,8 @@ #define NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT "services.task_exit" #define NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT "services.task_error" +#define NETDATA_EBPF_CGROUP_UPDATE 10 + // Statistical information enum netdata_ebpf_thread_stats{ NETDATA_EBPF_THREAD_STAT_TOTAL, diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c index 939741e7..bd928cbd 100644 --- a/collectors/ebpf.plugin/ebpf_shm.c +++ b/collectors/ebpf.plugin/ebpf_shm.c @@ -7,7 +7,6 @@ static char *shm_dimension_name[NETDATA_SHM_END] = { "get", "at", "dt", "ctl" }; static netdata_syscall_stat_t shm_aggregated_data[NETDATA_SHM_END]; static netdata_publish_syscall_t shm_publish_aggregated[NETDATA_SHM_END]; -static int read_thread_closed = 1; netdata_publish_shm_t *shm_vector = NULL; static netdata_idx_t shm_hash_values[NETDATA_SHM_END]; @@ -35,11 +34,9 @@ static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = NULL, .internal_input = 0, .user_input = 0}}; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - struct netdata_static_thread shm_threads = {"SHM KERNEL", NULL, NULL, 1, NULL, NULL, NULL}; +static enum ebpf_threads_status ebpf_shm_exited = NETDATA_THREAD_EBPF_RUNNING; netdata_ebpf_targets_t shm_targets[] = { {.name = "shmget", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "shmat", .mode = EBPF_LOAD_TRAMPOLINE}, @@ -243,55 +240,48 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e *****************************************************************/ /** - * Clean shm structure + * SHM Exit + * + * Cancel child thread. + * + * @param ptr thread data. */ -void clean_shm_pid_structures() { - struct pid_stat *pids = root_of_pids; - while (pids) { - freez(shm_pid[pids->pid]); - - pids = pids->next; +static void ebpf_shm_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; } + + ebpf_shm_exited = NETDATA_THREAD_EBPF_STOPPING; } /** - * Clean up the main thread. + * SHM Cleanup + * + * Clean up allocated memory. * * @param ptr thread data. */ static void ebpf_shm_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) { + if (ebpf_shm_exited != NETDATA_THREAD_EBPF_STOPPED) return; - } - - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 2 * USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } ebpf_cleanup_publish_syscall(shm_publish_aggregated); freez(shm_vector); freez(shm_values); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } #ifdef LIBBPF_MAJOR_VERSION - else if (bpf_obj) + if (bpf_obj) shm_bpf__destroy(bpf_obj); #endif + + shm_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -467,21 +457,24 @@ static void read_global_table() */ void *ebpf_shm_read_hash(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(ebpf_shm_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_SHM_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_shm_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_shm_exited == NETDATA_THREAD_EBPF_STOPPING) + break; read_global_table(); } - read_thread_closed = 1; + ebpf_shm_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -740,7 +733,7 @@ static int ebpf_send_systemd_shm_charts() for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_shm.get); - } else + } else if (unlikely(ect->systemd)) ret = 0; } write_end_chart(); @@ -859,44 +852,44 @@ static void shm_collector(ebpf_module_t *em) netdata_thread_create( shm_threads.thread, shm_threads.name, - NETDATA_THREAD_OPTION_JOINABLE, + NETDATA_THREAD_OPTION_DEFAULT, ebpf_shm_read_hash, em ); - int apps = em->apps_charts; int cgroups = em->cgroup_charts; int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - if (apps) { - read_apps_table(); - } + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; - if (cgroups) { - ebpf_update_shm_cgroup(); - } + netdata_apps_integration_flags_t apps = em->apps_charts; + pthread_mutex_lock(&collect_data_mutex); + if (apps) { + read_apps_table(); + } - pthread_mutex_lock(&lock); + if (cgroups) { + ebpf_update_shm_cgroup(); + } - shm_send_global(); + pthread_mutex_lock(&lock); - if (apps) { - ebpf_shm_send_apps_data(apps_groups_root_target); - } + shm_send_global(); - if (cgroups) { - ebpf_shm_send_cgroup_data(update_every); - } + if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) { + ebpf_shm_send_apps_data(apps_groups_root_target); + } - pthread_mutex_unlock(&lock); + if (cgroups) { + ebpf_shm_send_cgroup_data(update_every); } + pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } @@ -950,6 +943,8 @@ void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr) 20194, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM); + + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /** @@ -1014,8 +1009,8 @@ static int ebpf_shm_load_bpf(ebpf_module_t *em) { int ret = 0; if (em->load == EBPF_LOAD_LEGACY) { - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; ret = -1; } @@ -1045,7 +1040,7 @@ static int ebpf_shm_load_bpf(ebpf_module_t *em) */ void *ebpf_shm_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_shm_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_shm_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = shm_maps; diff --git a/collectors/ebpf.plugin/ebpf_shm.h b/collectors/ebpf.plugin/ebpf_shm.h index f0559e43..8e118a6f 100644 --- a/collectors/ebpf.plugin/ebpf_shm.h +++ b/collectors/ebpf.plugin/ebpf_shm.h @@ -56,7 +56,6 @@ extern netdata_publish_shm_t **shm_pid; extern void *ebpf_shm_thread(void *ptr); extern void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr); -extern void clean_shm_pid_structures(); extern netdata_ebpf_targets_t shm_targets[]; extern struct config shm_config; diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c index 7b2d4a5b..ba63934d 100644 --- a/collectors/ebpf.plugin/ebpf_socket.c +++ b/collectors/ebpf.plugin/ebpf_socket.c @@ -61,10 +61,8 @@ static netdata_publish_syscall_t socket_publish_aggregated[NETDATA_MAX_SOCKET_VE ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL; static ebpf_bandwidth_t *bandwidth_vector = NULL; -static int socket_apps_created = 0; pthread_mutex_t nv_mutex; int wait_to_plot = 0; -int read_thread_closed = 1; netdata_vector_plot_t inbound_vectors = { .plot = NULL, .next = 0, .last = 0 }; netdata_vector_plot_t outbound_vectors = { .plot = NULL, .next = 0, .last = 0 }; @@ -72,9 +70,6 @@ netdata_socket_t *socket_values; ebpf_network_viewer_port_list_t *listen_ports = NULL; -static struct bpf_object *objects = NULL; -static struct bpf_link **probe_links = NULL; - struct config socket_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, @@ -92,6 +87,11 @@ netdata_ebpf_targets_t socket_targets[] = { {.name = "inet_csk_accept", .mode = {.name = "tcp_v6_connect", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}}; +struct netdata_static_thread socket_threads = {"EBPF SOCKET READ", + NULL, NULL, 1, NULL, + NULL, NULL }; +static enum ebpf_threads_status ebpf_socket_exited = NETDATA_THREAD_EBPF_RUNNING; + #ifdef LIBBPF_MAJOR_VERSION #include "includes/socket.skel.h" // BTF code @@ -424,6 +424,225 @@ static inline int ebpf_socket_load_and_attach(struct socket_bpf *obj, ebpf_modul return ret; } #endif + +/***************************************************************** + * + * FUNCTIONS TO CLOSE THE THREAD + * + *****************************************************************/ + +/** + * Clean internal socket plot + * + * Clean all structures allocated with strdupz. + * + * @param ptr the pointer with addresses to clean. + */ +static inline void clean_internal_socket_plot(netdata_socket_plot_t *ptr) +{ + freez(ptr->dimension_recv); + freez(ptr->dimension_sent); + freez(ptr->resolved_name); + freez(ptr->dimension_retransmit); +} + +/** + * Clean socket plot + * + * Clean the allocated data for inbound and outbound vectors. + */ +static void clean_allocated_socket_plot() +{ + uint32_t i; + uint32_t end = inbound_vectors.last; + netdata_socket_plot_t *plot = inbound_vectors.plot; + for (i = 0; i < end; i++) { + clean_internal_socket_plot(&plot[i]); + } + + clean_internal_socket_plot(&plot[inbound_vectors.last]); + + end = outbound_vectors.last; + plot = outbound_vectors.plot; + for (i = 0; i < end; i++) { + clean_internal_socket_plot(&plot[i]); + } + clean_internal_socket_plot(&plot[outbound_vectors.last]); +} + +/** + * Clean network ports allocated during initialization. + * + * @param ptr a pointer to the link list. + */ +static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr) +{ + if (unlikely(!ptr)) + return; + + while (ptr) { + ebpf_network_viewer_port_list_t *next = ptr->next; + freez(ptr->value); + freez(ptr); + ptr = next; + } +} + +/** + * Clean service names + * + * Clean the allocated link list that stores names. + * + * @param names the link list. + */ +static void clean_service_names(ebpf_network_viewer_dim_name_t *names) +{ + if (unlikely(!names)) + return; + + while (names) { + ebpf_network_viewer_dim_name_t *next = names->next; + freez(names->name); + freez(names); + names = next; + } +} + +/** + * Clean hostnames + * + * @param hostnames the hostnames to clean + */ +static void clean_hostnames(ebpf_network_viewer_hostname_list_t *hostnames) +{ + if (unlikely(!hostnames)) + return; + + while (hostnames) { + ebpf_network_viewer_hostname_list_t *next = hostnames->next; + freez(hostnames->value); + simple_pattern_free(hostnames->value_pattern); + freez(hostnames); + hostnames = next; + } +} + +/** + * Cleanup publish syscall + * + * @param nps list of structures to clean + */ +void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps) +{ + while (nps) { + freez(nps->algorithm); + nps = nps->next; + } +} + +/** + * Clean port Structure + * + * Clean the allocated list. + * + * @param clean the list that will be cleaned + */ +void clean_port_structure(ebpf_network_viewer_port_list_t **clean) +{ + ebpf_network_viewer_port_list_t *move = *clean; + while (move) { + ebpf_network_viewer_port_list_t *next = move->next; + freez(move->value); + freez(move); + + move = next; + } + *clean = NULL; +} + +/** + * Clean IP structure + * + * Clean the allocated list. + * + * @param clean the list that will be cleaned + */ +static void clean_ip_structure(ebpf_network_viewer_ip_list_t **clean) +{ + ebpf_network_viewer_ip_list_t *move = *clean; + while (move) { + ebpf_network_viewer_ip_list_t *next = move->next; + freez(move->value); + freez(move); + + move = next; + } + *clean = NULL; +} + +/** + * Socket exit + * + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_socket_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; + } + + ebpf_socket_exited = NETDATA_THREAD_EBPF_STOPPING; +} + +/** + * Socket cleanup + * + * Clean up allocated addresses. + * + * @param ptr thread data. + */ +void ebpf_socket_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (ebpf_socket_exited != NETDATA_THREAD_EBPF_STOPPED) + return; + + ebpf_cleanup_publish_syscall(socket_publish_aggregated); + freez(socket_hash_values); + + freez(bandwidth_vector); + + freez(socket_values); + clean_allocated_socket_plot(); + freez(inbound_vectors.plot); + freez(outbound_vectors.plot); + + clean_port_structure(&listen_ports); + + ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled = 0; + + clean_network_ports(network_viewer_opt.included_port); + clean_network_ports(network_viewer_opt.excluded_port); + clean_service_names(network_viewer_opt.names); + clean_hostnames(network_viewer_opt.included_hostnames); + clean_hostnames(network_viewer_opt.excluded_hostnames); + + pthread_mutex_destroy(&nv_mutex); + + freez(socket_threads.thread); + +#ifdef LIBBPF_MAJOR_VERSION + if (bpf_obj) + socket_bpf__destroy(bpf_obj); +#endif + socket_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; +} + /***************************************************************** * * PROCESS DATA AND SEND TO NETDATA @@ -737,8 +956,6 @@ long long ebpf_socket_sum_values_for_pids(struct pid_on_target *root, size_t off void ebpf_socket_send_apps_data(ebpf_module_t *em, struct target *root) { UNUSED(em); - if (!socket_apps_created) - return; struct target *w; collected_number value; @@ -1052,7 +1269,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - socket_apps_created = 1; + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /** @@ -1814,15 +2031,6 @@ static void read_socket_hash_table(int fd, int family, int network_connection) key = next_key; } - - test = bpf_map_lookup_elem(fd, &next_key, values); - if (test < 0) { - return; - } - - if (network_connection) { - hash_accumulator(values, &next_key, family, end); - } } /** @@ -1929,18 +2137,20 @@ static void read_listen_table() */ void *ebpf_socket_read_hash(void *ptr) { + netdata_thread_cleanup_push(ebpf_socket_cleanup, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; - read_thread_closed = 0; heartbeat_t hb; heartbeat_init(&hb); usec_t step = NETDATA_SOCKET_READ_SLEEP_MS * em->update_every; int fd_ipv4 = socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd; int fd_ipv6 = socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd; int network_connection = em->optional; - while (!close_ebpf_plugin) { + while (ebpf_socket_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_socket_exited == NETDATA_THREAD_EBPF_STOPPING) + break; pthread_mutex_lock(&nv_mutex); read_listen_table(); @@ -1950,7 +2160,9 @@ void *ebpf_socket_read_hash(void *ptr) pthread_mutex_unlock(&nv_mutex); } - read_thread_closed = 1; + ebpf_socket_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -2502,7 +2714,7 @@ static int ebpf_send_systemd_socket_charts() for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v4_connection); - } else + } else if (unlikely(ect->systemd)) ret = 0; } write_end_chart(); @@ -2643,10 +2855,6 @@ static void ebpf_socket_send_cgroup_data(int update_every) * *****************************************************************/ -struct netdata_static_thread socket_threads = {"EBPF SOCKET READ", - NULL, NULL, 1, NULL, - NULL, ebpf_socket_read_hash }; - /** * Main loop for this collector. * @@ -2655,297 +2863,74 @@ struct netdata_static_thread socket_threads = {"EBPF SOCKET READ", */ static void socket_collector(usec_t step, ebpf_module_t *em) { - UNUSED(step); heartbeat_t hb; heartbeat_init(&hb); socket_threads.thread = mallocz(sizeof(netdata_thread_t)); + socket_threads.start_routine = ebpf_socket_read_hash; netdata_thread_create(socket_threads.thread, socket_threads.name, - NETDATA_THREAD_OPTION_JOINABLE, ebpf_socket_read_hash, em); + NETDATA_THREAD_OPTION_DEFAULT, ebpf_socket_read_hash, em); int cgroups = em->cgroup_charts; if (cgroups) ebpf_socket_update_cgroup_algorithm(); - int socket_apps_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].apps_charts; - int socket_global_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].global_charts; + int socket_global_enabled = em->global_charts; int network_connection = em->optional; int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; + + netdata_apps_integration_flags_t socket_apps_enabled = em->apps_charts; pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + if (socket_global_enabled) + read_hash_global_tables(); - if (++counter == update_every) { - counter = 0; - if (socket_global_enabled) - read_hash_global_tables(); + if (socket_apps_enabled) + ebpf_socket_update_apps_data(); - if (socket_apps_enabled) - ebpf_socket_update_apps_data(); + if (cgroups) + ebpf_update_socket_cgroup(); - if (cgroups) - ebpf_update_socket_cgroup(); + calculate_nv_plot(); - calculate_nv_plot(); + pthread_mutex_lock(&lock); + if (socket_global_enabled) + ebpf_socket_send_data(em); - pthread_mutex_lock(&lock); - if (socket_global_enabled) - ebpf_socket_send_data(em); + if (socket_apps_enabled & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) + ebpf_socket_send_apps_data(em, apps_groups_root_target); - if (socket_apps_enabled) - ebpf_socket_send_apps_data(em, apps_groups_root_target); + if (cgroups) + ebpf_socket_send_cgroup_data(update_every); - if (cgroups) - ebpf_socket_send_cgroup_data(update_every); + fflush(stdout); + if (network_connection) { + // We are calling fflush many times, because when we have a lot of dimensions + // we began to have not expected outputs and Netdata closed the plugin. + pthread_mutex_lock(&nv_mutex); + ebpf_socket_create_nv_charts(&inbound_vectors, update_every); fflush(stdout); + ebpf_socket_send_nv_data(&inbound_vectors); - if (network_connection) { - // We are calling fflush many times, because when we have a lot of dimensions - // we began to have not expected outputs and Netdata closed the plugin. - pthread_mutex_lock(&nv_mutex); - ebpf_socket_create_nv_charts(&inbound_vectors, update_every); - fflush(stdout); - ebpf_socket_send_nv_data(&inbound_vectors); - - ebpf_socket_create_nv_charts(&outbound_vectors, update_every); - fflush(stdout); - ebpf_socket_send_nv_data(&outbound_vectors); - wait_to_plot = 0; - pthread_mutex_unlock(&nv_mutex); + ebpf_socket_create_nv_charts(&outbound_vectors, update_every); + fflush(stdout); + ebpf_socket_send_nv_data(&outbound_vectors); + wait_to_plot = 0; + pthread_mutex_unlock(&nv_mutex); - } - pthread_mutex_unlock(&lock); } - + pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } /***************************************************************** * - * FUNCTIONS TO CLOSE THE THREAD - * - *****************************************************************/ - - -/** - * Clean internal socket plot - * - * Clean all structures allocated with strdupz. - * - * @param ptr the pointer with addresses to clean. - */ -static inline void clean_internal_socket_plot(netdata_socket_plot_t *ptr) -{ - freez(ptr->dimension_recv); - freez(ptr->dimension_sent); - freez(ptr->resolved_name); - freez(ptr->dimension_retransmit); -} - -/** - * Clean socket plot - * - * Clean the allocated data for inbound and outbound vectors. - */ -static void clean_allocated_socket_plot() -{ - uint32_t i; - uint32_t end = inbound_vectors.last; - netdata_socket_plot_t *plot = inbound_vectors.plot; - for (i = 0; i < end; i++) { - clean_internal_socket_plot(&plot[i]); - } - - clean_internal_socket_plot(&plot[inbound_vectors.last]); - - end = outbound_vectors.last; - plot = outbound_vectors.plot; - for (i = 0; i < end; i++) { - clean_internal_socket_plot(&plot[i]); - } - clean_internal_socket_plot(&plot[outbound_vectors.last]); -} - -/** - * Clean network ports allocated during initialization. - * - * @param ptr a pointer to the link list. - */ -static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr) -{ - if (unlikely(!ptr)) - return; - - while (ptr) { - ebpf_network_viewer_port_list_t *next = ptr->next; - freez(ptr->value); - freez(ptr); - ptr = next; - } -} - -/** - * Clean service names - * - * Clean the allocated link list that stores names. - * - * @param names the link list. - */ -static void clean_service_names(ebpf_network_viewer_dim_name_t *names) -{ - if (unlikely(!names)) - return; - - while (names) { - ebpf_network_viewer_dim_name_t *next = names->next; - freez(names->name); - freez(names); - names = next; - } -} - -/** - * Clean hostnames - * - * @param hostnames the hostnames to clean - */ -static void clean_hostnames(ebpf_network_viewer_hostname_list_t *hostnames) -{ - if (unlikely(!hostnames)) - return; - - while (hostnames) { - ebpf_network_viewer_hostname_list_t *next = hostnames->next; - freez(hostnames->value); - simple_pattern_free(hostnames->value_pattern); - freez(hostnames); - hostnames = next; - } -} - -void clean_socket_apps_structures() { - struct pid_stat *pids = root_of_pids; - while (pids) { - freez(socket_bandwidth_curr[pids->pid]); - - pids = pids->next; - } -} - -/** - * Cleanup publish syscall - * - * @param nps list of structures to clean - */ -void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps) -{ - while (nps) { - freez(nps->algorithm); - nps = nps->next; - } -} - -/** - * Clean port Structure - * - * Clean the allocated list. - * - * @param clean the list that will be cleaned - */ -void clean_port_structure(ebpf_network_viewer_port_list_t **clean) -{ - ebpf_network_viewer_port_list_t *move = *clean; - while (move) { - ebpf_network_viewer_port_list_t *next = move->next; - freez(move->value); - freez(move); - - move = next; - } - *clean = NULL; -} - -/** - * Clean IP structure - * - * Clean the allocated list. - * - * @param clean the list that will be cleaned - */ -static void clean_ip_structure(ebpf_network_viewer_ip_list_t **clean) -{ - ebpf_network_viewer_ip_list_t *move = *clean; - while (move) { - ebpf_network_viewer_ip_list_t *next = move->next; - freez(move->value); - freez(move); - - move = next; - } - *clean = NULL; -} - -/** - * Clean up the main thread. - * - * @param ptr thread data. - */ -static void ebpf_socket_cleanup(void *ptr) -{ - ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) - return; - - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 2*USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } - - ebpf_cleanup_publish_syscall(socket_publish_aggregated); - freez(socket_hash_values); - - freez(bandwidth_vector); - - freez(socket_values); - clean_allocated_socket_plot(); - freez(inbound_vectors.plot); - freez(outbound_vectors.plot); - - clean_port_structure(&listen_ports); - - ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled = 0; - - clean_network_ports(network_viewer_opt.included_port); - clean_network_ports(network_viewer_opt.excluded_port); - clean_service_names(network_viewer_opt.names); - clean_hostnames(network_viewer_opt.included_hostnames); - clean_hostnames(network_viewer_opt.excluded_hostnames); - - pthread_mutex_destroy(&nv_mutex); - - freez(socket_threads.thread); - - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } - finalized_threads = 1; -} - -/***************************************************************** - * * FUNCTIONS TO START THREAD * *****************************************************************/ @@ -3891,8 +3876,8 @@ static int ebpf_socket_load_bpf(ebpf_module_t *em) int ret = 0; if (em->load == EBPF_LOAD_LEGACY) { - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { ret = -1; } } @@ -3924,7 +3909,7 @@ static int ebpf_socket_load_bpf(ebpf_module_t *em) */ void *ebpf_socket_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_socket_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_socket_exit, ptr); memset(&inbound_vectors.tree, 0, sizeof(avl_tree_lock)); memset(&outbound_vectors.tree, 0, sizeof(avl_tree_lock)); @@ -3946,7 +3931,6 @@ void *ebpf_socket_thread(void *ptr) error("Cannot initialize local mutex"); goto endsocket; } - pthread_mutex_lock(&lock); ebpf_socket_allocate_global_vectors(em->apps_charts); initialize_inbound_outbound(); @@ -3973,11 +3957,11 @@ void *ebpf_socket_thread(void *ptr) socket_aggregated_data, socket_publish_aggregated, socket_dimension_names, socket_id_names, algorithms, NETDATA_MAX_SOCKET_VECTOR); + pthread_mutex_lock(&lock); ebpf_create_global_charts(em); ebpf_update_stats(&plugin_statistics, em); - finalized_threads = 0; pthread_mutex_unlock(&lock); socket_collector((usec_t)(em->update_every * USEC_PER_SEC), em); diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h index 67200130..711225ac 100644 --- a/collectors/ebpf.plugin/ebpf_socket.h +++ b/collectors/ebpf.plugin/ebpf_socket.h @@ -279,7 +279,7 @@ typedef struct netdata_socket { uint32_t retransmit; // It is never used with UDP uint16_t protocol; uint16_t reserved; -} netdata_socket_t __attribute__((__aligned__(8))); +} netdata_socket_t; typedef struct netdata_plot_values { // Values used in the previous iteration @@ -307,7 +307,7 @@ typedef struct netdata_socket_idx { uint16_t sport; union netdata_ip_t daddr; uint16_t dport; -} netdata_socket_idx_t __attribute__((__aligned__(8))); +} netdata_socket_idx_t; // Next values were defined according getnameinfo(3) #define NETDATA_MAX_NETWORK_COMBINED_LENGTH 1018 @@ -362,7 +362,6 @@ extern void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_ extern void parse_network_viewer_section(struct config *cfg); extern void fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table); extern void parse_service_name_section(struct config *cfg); -extern void clean_socket_apps_structures(); extern ebpf_socket_publish_apps_t **socket_bandwidth_curr; extern struct config socket_config; diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c index f5e79279..ed13f027 100644 --- a/collectors/ebpf.plugin/ebpf_softirq.c +++ b/collectors/ebpf.plugin/ebpf_softirq.c @@ -54,51 +54,51 @@ static softirq_val_t softirq_vals[] = { // tmp store for soft IRQ values we get from a per-CPU eBPF map. static softirq_ebpf_val_t *softirq_ebpf_vals = NULL; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - -static int read_thread_closed = 1; - static struct netdata_static_thread softirq_threads = {"SOFTIRQ KERNEL", NULL, NULL, 1, NULL, NULL, NULL }; +static enum ebpf_threads_status ebpf_softirq_exited = NETDATA_THREAD_EBPF_RUNNING; /** - * Clean up the main thread. + * Exit + * + * Cancel thread. * * @param ptr thread data. */ -static void softirq_cleanup(void *ptr) +static void softirq_exit(void *ptr) { - for (int i = 0; softirq_tracepoints[i].class != NULL; i++) { - ebpf_disable_tracepoint(&softirq_tracepoints[i]); - } - ebpf_module_t *em = (ebpf_module_t *)ptr; if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; return; } - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 1 * USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } + ebpf_softirq_exited = NETDATA_THREAD_EBPF_STOPPING; +} + +/** + * Cleanup + * + * Clean up allocated memory. + * + * @param ptr thread data. + */ +static void softirq_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (ebpf_softirq_exited != NETDATA_THREAD_EBPF_STOPPED) + return; - freez(softirq_ebpf_vals); freez(softirq_threads.thread); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); + for (int i = 0; softirq_tracepoints[i].class != NULL; i++) { + ebpf_disable_tracepoint(&softirq_tracepoints[i]); } + freez(softirq_ebpf_vals); + + softirq_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -131,22 +131,24 @@ static void softirq_read_latency_map() */ static void *softirq_reader(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(softirq_exit, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_SOFTIRQ_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_softirq_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); UNUSED(dt); + if (ebpf_softirq_exited == NETDATA_THREAD_EBPF_STOPPING) + break; softirq_read_latency_map(); } + ebpf_softirq_exited = NETDATA_THREAD_EBPF_STOPPED; - read_thread_closed = 1; + netdata_thread_cleanup_pop(1); return NULL; } @@ -200,7 +202,7 @@ static void softirq_collector(ebpf_module_t *em) netdata_thread_create( softirq_threads.thread, softirq_threads.name, - NETDATA_THREAD_OPTION_JOINABLE, + NETDATA_THREAD_OPTION_DEFAULT, softirq_reader, em ); @@ -213,24 +215,23 @@ static void softirq_collector(ebpf_module_t *em) pthread_mutex_unlock(&lock); // loop and read from published data until ebpf plugin is closed. - int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - pthread_mutex_lock(&lock); - - // write dims now for all hitherto discovered IRQs. - write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency"); - softirq_write_dims(); - write_end_chart(); - - pthread_mutex_unlock(&lock); - } - pthread_mutex_unlock(&collect_data_mutex); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = em->update_every * USEC_PER_SEC; + //This will be cancelled by its parent + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; + + pthread_mutex_lock(&lock); + + // write dims now for all hitherto discovered IRQs. + write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency"); + softirq_write_dims(); + write_end_chart(); + + pthread_mutex_unlock(&lock); } } @@ -260,8 +261,8 @@ void *ebpf_softirq_thread(void *ptr) goto endsoftirq; } - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; goto endsoftirq; } diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c index 7d842335..71d0c402 100644 --- a/collectors/ebpf.plugin/ebpf_swap.c +++ b/collectors/ebpf.plugin/ebpf_swap.c @@ -7,7 +7,6 @@ static char *swap_dimension_name[NETDATA_SWAP_END] = { "read", "write" }; static netdata_syscall_stat_t swap_aggregated_data[NETDATA_SWAP_END]; static netdata_publish_syscall_t swap_publish_aggregated[NETDATA_SWAP_END]; -static int read_thread_closed = 1; netdata_publish_swap_t *swap_vector = NULL; static netdata_idx_t swap_hash_values[NETDATA_SWAP_END]; @@ -35,11 +34,9 @@ static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = NULL, .internal_input = 0, .user_input = 0}}; -static struct bpf_link **probe_links = NULL; -static struct bpf_object *objects = NULL; - struct netdata_static_thread swap_threads = {"SWAP KERNEL", NULL, NULL, 1, NULL, NULL, NULL}; +static enum ebpf_threads_status ebpf_swap_exited = NETDATA_THREAD_EBPF_RUNNING; netdata_ebpf_targets_t swap_targets[] = { {.name = "swap_readpage", .mode = EBPF_LOAD_TRAMPOLINE}, {.name = "swap_writepage", .mode = EBPF_LOAD_TRAMPOLINE}, @@ -196,54 +193,48 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t *****************************************************************/ /** - * Clean swap structure + * Swap exit + * + * Cancel thread and exit. + * + * @param ptr thread data. */ -void clean_swap_pid_structures() { - struct pid_stat *pids = root_of_pids; - while (pids) { - freez(swap_pid[pids->pid]); - - pids = pids->next; +static void ebpf_swap_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; } + + ebpf_swap_exited = NETDATA_THREAD_EBPF_STOPPING; } /** - * Clean up the main thread. + * Swap cleanup + * + * Clean up allocated memory. * * @param ptr thread data. */ static void ebpf_swap_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) + if (ebpf_swap_exited != NETDATA_THREAD_EBPF_STOPPED) return; - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 2 * USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } - ebpf_cleanup_publish_syscall(swap_publish_aggregated); freez(swap_vector); freez(swap_values); + freez(swap_threads.thread); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } #ifdef LIBBPF_MAJOR_VERSION - else if (bpf_obj) + if (bpf_obj) swap_bpf__destroy(bpf_obj); #endif + swap_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -404,21 +395,24 @@ static void read_global_table() */ void *ebpf_swap_read_hash(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(ebpf_swap_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_SWAP_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_swap_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_swap_exited == NETDATA_THREAD_EBPF_STOPPING) + break; read_global_table(); } - read_thread_closed = 1; + ebpf_swap_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -523,7 +517,7 @@ static int ebpf_send_systemd_swap_charts() for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.read); - } else + } else if (unlikely(ect->systemd)) ret = 0; } write_end_chart(); @@ -690,37 +684,38 @@ static void swap_collector(ebpf_module_t *em) swap_threads.thread = mallocz(sizeof(netdata_thread_t)); swap_threads.start_routine = ebpf_swap_read_hash; - netdata_thread_create(swap_threads.thread, swap_threads.name, NETDATA_THREAD_OPTION_JOINABLE, + netdata_thread_create(swap_threads.thread, swap_threads.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_swap_read_hash, em); - int apps = em->apps_charts; int cgroup = em->cgroup_charts; int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; - if (++counter == update_every) { - counter = 0; - if (apps) - read_apps_table(); + netdata_apps_integration_flags_t apps = em->apps_charts; + pthread_mutex_lock(&collect_data_mutex); + if (apps) + read_apps_table(); - if (cgroup) - ebpf_update_swap_cgroup(); + if (cgroup) + ebpf_update_swap_cgroup(); - pthread_mutex_lock(&lock); + pthread_mutex_lock(&lock); - swap_send_global(); + swap_send_global(); - if (apps) - ebpf_swap_send_apps_data(apps_groups_root_target); + if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) + ebpf_swap_send_apps_data(apps_groups_root_target); - if (cgroup) - ebpf_swap_send_cgroup_data(update_every); + if (cgroup) + ebpf_swap_send_cgroup_data(update_every); - pthread_mutex_unlock(&lock); - } + pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } @@ -758,6 +753,7 @@ void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr) 20192, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], root, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /** @@ -817,8 +813,8 @@ static int ebpf_swap_load_bpf(ebpf_module_t *em) { int ret = 0; if (em->load == EBPF_LOAD_LEGACY) { - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { ret = -1; } } @@ -849,7 +845,7 @@ static int ebpf_swap_load_bpf(ebpf_module_t *em) */ void *ebpf_swap_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_swap_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_swap_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = swap_maps; diff --git a/collectors/ebpf.plugin/ebpf_swap.h b/collectors/ebpf.plugin/ebpf_swap.h index 31bda16a..80c2c8e9 100644 --- a/collectors/ebpf.plugin/ebpf_swap.h +++ b/collectors/ebpf.plugin/ebpf_swap.h @@ -46,7 +46,6 @@ extern netdata_publish_swap_t **swap_pid; extern void *ebpf_swap_thread(void *ptr); extern void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr); -extern void clean_swap_pid_structures(); extern struct config swap_config; extern netdata_ebpf_targets_t swap_targets[]; diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c index b45ec86c..0e56f541 100644 --- a/collectors/ebpf.plugin/ebpf_sync.c +++ b/collectors/ebpf.plugin/ebpf_sync.c @@ -8,8 +8,6 @@ static char *sync_counter_dimension_name[NETDATA_SYNC_IDX_END] = { "sync", "sync static netdata_syscall_stat_t sync_counter_aggregated_data[NETDATA_SYNC_IDX_END]; static netdata_publish_syscall_t sync_counter_publish_aggregated[NETDATA_SYNC_IDX_END]; -static int read_thread_closed = 1; - static netdata_idx_t sync_hash_values[NETDATA_SYNC_IDX_END]; struct netdata_static_thread sync_threads = {"SYNC KERNEL", NULL, NULL, 1, @@ -43,44 +41,6 @@ struct config sync_config = { .first_section = NULL, .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; -ebpf_sync_syscalls_t local_syscalls[] = { - {.syscall = NETDATA_SYSCALLS_SYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, -#ifdef LIBBPF_MAJOR_VERSION - .sync_obj = NULL -#endif - }, - {.syscall = NETDATA_SYSCALLS_SYNCFS, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, -#ifdef LIBBPF_MAJOR_VERSION - .sync_obj = NULL -#endif - }, - {.syscall = NETDATA_SYSCALLS_MSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, -#ifdef LIBBPF_MAJOR_VERSION - .sync_obj = NULL -#endif - }, - {.syscall = NETDATA_SYSCALLS_FSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, -#ifdef LIBBPF_MAJOR_VERSION - .sync_obj = NULL -#endif - }, - {.syscall = NETDATA_SYSCALLS_FDATASYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, -#ifdef LIBBPF_MAJOR_VERSION - .sync_obj = NULL -#endif - }, - {.syscall = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL, -#ifdef LIBBPF_MAJOR_VERSION - .sync_obj = NULL -#endif - }, - {.syscall = NULL, .enabled = CONFIG_BOOLEAN_NO, .objects = NULL, .probe_links = NULL, -#ifdef LIBBPF_MAJOR_VERSION - .sync_obj = NULL -#endif - } -}; - netdata_ebpf_targets_t sync_targets[] = { {.name = NETDATA_SYSCALLS_SYNC, .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NETDATA_SYSCALLS_SYNCFS, .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NETDATA_SYSCALLS_MSYNC, .mode = EBPF_LOAD_TRAMPOLINE}, @@ -88,6 +48,7 @@ netdata_ebpf_targets_t sync_targets[] = { {.name = NETDATA_SYSCALLS_SYNC, .mode {.name = NETDATA_SYSCALLS_FDATASYNC, .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .mode = EBPF_LOAD_TRAMPOLINE}, {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}}; +static enum ebpf_threads_status ebpf_sync_exited = NETDATA_THREAD_EBPF_RUNNING; #ifdef LIBBPF_MAJOR_VERSION @@ -218,6 +179,76 @@ static inline int ebpf_sync_load_and_attach(struct sync_bpf *obj, ebpf_module_t /***************************************************************** * + * CLEANUP THREAD + * + *****************************************************************/ + +/** + * Cleanup Objects + * + * Cleanup loaded objects when thread was initialized. + */ +void ebpf_sync_cleanup_objects() +{ + int i; + for (i = 0; local_syscalls[i].syscall; i++) { + ebpf_sync_syscalls_t *w = &local_syscalls[i]; + if (w->probe_links) { + struct bpf_program *prog; + size_t j = 0 ; + bpf_object__for_each_program(prog, w->objects) { + bpf_link__destroy(w->probe_links[j]); + j++; + } + freez(w->probe_links); + if (w->objects) + bpf_object__close(w->objects); + } +#ifdef LIBBPF_MAJOR_VERSION + else if (w->sync_obj) + sync_bpf__destroy(w->sync_obj); +#endif + } +} + +/** + * Exit + * + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_sync_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; + } + + ebpf_sync_exited = NETDATA_THREAD_EBPF_STOPPING; +} + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_sync_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (ebpf_sync_exited != NETDATA_THREAD_EBPF_STOPPED) + return; + + ebpf_sync_cleanup_objects(); + freez(sync_threads.thread); + + sync_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +/***************************************************************** + * * INITIALIZE THREAD * *****************************************************************/ @@ -334,21 +365,25 @@ static void read_global_table() */ void *ebpf_sync_read_hash(void *ptr) { + netdata_thread_cleanup_push(ebpf_sync_cleanup, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; - read_thread_closed = 0; heartbeat_t hb; heartbeat_init(&hb); usec_t step = NETDATA_EBPF_SYNC_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + while (ebpf_sync_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_sync_exited == NETDATA_THREAD_EBPF_STOPPING) + break; read_global_table(); } - read_thread_closed = 1; + ebpf_sync_exited = NETDATA_THREAD_EBPF_STOPPED; + + netdata_thread_cleanup_pop(1); return NULL; } @@ -414,82 +449,25 @@ static void sync_collector(ebpf_module_t *em) sync_threads.thread = mallocz(sizeof(netdata_thread_t)); sync_threads.start_routine = ebpf_sync_read_hash; - netdata_thread_create(sync_threads.thread, sync_threads.name, NETDATA_THREAD_OPTION_JOINABLE, + netdata_thread_create(sync_threads.thread, sync_threads.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_sync_read_hash, em); - int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - - if (++counter == update_every) { - counter = 0; - pthread_mutex_lock(&lock); - - sync_send_data(); - - pthread_mutex_unlock(&lock); - } - pthread_mutex_unlock(&collect_data_mutex); - } -} + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = em->update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; + pthread_mutex_lock(&lock); -/***************************************************************** - * - * CLEANUP THREAD - * - *****************************************************************/ + sync_send_data(); -/** - * Cleanup Objects - * - * Cleanup loaded objects when thread was initialized. - */ -void ebpf_sync_cleanup_objects() -{ - int i; - for (i = 0; local_syscalls[i].syscall; i++) { - ebpf_sync_syscalls_t *w = &local_syscalls[i]; - if (w->probe_links) { - struct bpf_program *prog; - size_t j = 0 ; - bpf_object__for_each_program(prog, w->objects) { - bpf_link__destroy(w->probe_links[j]); - j++; - } - bpf_object__close(w->objects); - } -#ifdef LIBBPF_MAJOR_VERSION - else if (w->sync_obj) - sync_bpf__destroy(w->sync_obj); -#endif + pthread_mutex_unlock(&lock); } } -/** - * Clean up the main thread. - * - * @param ptr thread data. - */ -static void ebpf_sync_cleanup(void *ptr) -{ - ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) - return; - - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 2*USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } - - ebpf_sync_cleanup_objects(); - freez(sync_threads.thread); -} /***************************************************************** * @@ -587,7 +565,7 @@ static void ebpf_sync_parse_syscalls() */ void *ebpf_sync_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_sync_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_sync_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = sync_maps; diff --git a/collectors/ebpf.plugin/ebpf_sync.h b/collectors/ebpf.plugin/ebpf_sync.h index 2bc18c54..a52434c1 100644 --- a/collectors/ebpf.plugin/ebpf_sync.h +++ b/collectors/ebpf.plugin/ebpf_sync.h @@ -41,23 +41,6 @@ typedef enum sync_syscalls_index { NETDATA_SYNC_IDX_END } sync_syscalls_index_t; -typedef struct ebpf_sync_syscalls { - char *syscall; - int enabled; - uint32_t flags; - - // BTF structure - struct bpf_object *objects; - struct bpf_link **probe_links; - - // BPF structure -#ifdef LIBBPF_MAJOR_VERSION - struct sync_bpf *sync_obj; -#else - void *sync_obj; -#endif -} ebpf_sync_syscalls_t; - enum netdata_sync_charts { NETDATA_SYNC_CALL, diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c index e14165fb..6746cc62 100644 --- a/collectors/ebpf.plugin/ebpf_vfs.c +++ b/collectors/ebpf.plugin/ebpf_vfs.c @@ -34,33 +34,32 @@ struct config vfs_config = { .first_section = NULL, .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; -static struct bpf_object *objects = NULL; -static struct bpf_link **probe_links = NULL; - struct netdata_static_thread vfs_threads = {"VFS KERNEL", NULL, NULL, 1, NULL, NULL, NULL}; - -static int read_thread_closed = 1; +static enum ebpf_threads_status ebpf_vfs_exited = NETDATA_THREAD_EBPF_RUNNING; /***************************************************************** * * FUNCTIONS TO CLOSE THE THREAD * *****************************************************************/ - /** - * Clean PID structures + * Exit * - * Clean the allocated structures. - */ -void clean_vfs_pid_structures() { - struct pid_stat *pids = root_of_pids; - while (pids) { - freez(vfs_pid[pids->pid]); - - pids = pids->next; + * Cancel thread and exit. + * + * @param ptr thread data. +**/ +static void ebpf_vfs_exit(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + em->enabled = NETDATA_MAIN_THREAD_EXITED; + return; } + + ebpf_vfs_exited = NETDATA_THREAD_EBPF_STOPPING; } /** @@ -71,29 +70,15 @@ void clean_vfs_pid_structures() { static void ebpf_vfs_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - if (!em->enabled) + if (ebpf_vfs_exited != NETDATA_THREAD_EBPF_STOPPED) return; - heartbeat_t hb; - heartbeat_init(&hb); - uint32_t tick = 50 * USEC_PER_MS; - while (!read_thread_closed) { - usec_t dt = heartbeat_next(&hb, tick); - UNUSED(dt); - } - freez(vfs_hash_values); freez(vfs_vector); + freez(vfs_threads.thread); - if (probe_links) { - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; - } - bpf_object__close(objects); - } + vfs_threads.enabled = NETDATA_MAIN_THREAD_EXITED; + em->enabled = NETDATA_MAIN_THREAD_EXITED; } /***************************************************************** @@ -526,23 +511,26 @@ static void read_update_vfs_cgroup() */ void *ebpf_vfs_read_hash(void *ptr) { - read_thread_closed = 0; - + netdata_thread_cleanup_push(ebpf_vfs_cleanup, ptr); heartbeat_t hb; heartbeat_init(&hb); ebpf_module_t *em = (ebpf_module_t *)ptr; usec_t step = NETDATA_LATENCY_VFS_SLEEP_MS * em->update_every; - while (!close_ebpf_plugin) { + //This will be cancelled by its parent + while (ebpf_vfs_exited == NETDATA_THREAD_EBPF_RUNNING) { usec_t dt = heartbeat_next(&hb, step); (void)dt; + if (ebpf_vfs_exited == NETDATA_THREAD_EBPF_STOPPING) + break; read_global_table(); } - read_thread_closed = 1; + ebpf_vfs_exited = NETDATA_THREAD_EBPF_STOPPED; + netdata_thread_cleanup_pop(1); return NULL; } @@ -1000,7 +988,7 @@ static int ebpf_send_systemd_vfs_charts(ebpf_module_t *em) for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { if (unlikely(ect->systemd) && unlikely(ect->updated)) { write_chart_dimension(ect->name, ect->publish_systemd_vfs.unlink_call); - } else + } else if (unlikely(ect->systemd)) ret = 0; } write_end_chart(); @@ -1181,38 +1169,38 @@ static void vfs_collector(ebpf_module_t *em) vfs_threads.thread = mallocz(sizeof(netdata_thread_t)); vfs_threads.start_routine = ebpf_vfs_read_hash; - netdata_thread_create(vfs_threads.thread, vfs_threads.name, NETDATA_THREAD_OPTION_JOINABLE, + netdata_thread_create(vfs_threads.thread, vfs_threads.name, NETDATA_THREAD_OPTION_DEFAULT, ebpf_vfs_read_hash, em); - int apps = em->apps_charts; int cgroups = em->cgroup_charts; - int update_every = em->update_every; - int counter = update_every - 1; - while (!close_ebpf_plugin) { - pthread_mutex_lock(&collect_data_mutex); - pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = em->update_every * USEC_PER_SEC; + while (!ebpf_exit_plugin) { + (void)heartbeat_next(&hb, step); + if (ebpf_exit_plugin) + break; - if (++counter == update_every) { - counter = 0; - if (apps) - ebpf_vfs_read_apps(); + netdata_apps_integration_flags_t apps = em->apps_charts; + pthread_mutex_lock(&collect_data_mutex); + if (apps) + ebpf_vfs_read_apps(); - if (cgroups) - read_update_vfs_cgroup(); + if (cgroups) + read_update_vfs_cgroup(); - pthread_mutex_lock(&lock); + pthread_mutex_lock(&lock); - ebpf_vfs_send_data(em); - fflush(stdout); + ebpf_vfs_send_data(em); + fflush(stdout); - if (apps) - ebpf_vfs_send_apps_data(em, apps_groups_root_target); + if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) + ebpf_vfs_send_apps_data(em, apps_groups_root_target); - if (cgroups) - ebpf_vfs_send_cgroup_data(em); + if (cgroups) + ebpf_vfs_send_cgroup_data(em); - pthread_mutex_unlock(&lock); - } + pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } @@ -1520,6 +1508,8 @@ void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr) ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); } + + em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED; } /***************************************************************** @@ -1564,7 +1554,7 @@ static void ebpf_vfs_allocate_global_vectors(int apps) */ void *ebpf_vfs_thread(void *ptr) { - netdata_thread_cleanup_push(ebpf_vfs_cleanup, ptr); + netdata_thread_cleanup_push(ebpf_vfs_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = vfs_maps; @@ -1576,8 +1566,8 @@ void *ebpf_vfs_thread(void *ptr) if (!em->enabled) goto endvfs; - probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &objects); - if (!probe_links) { + em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects); + if (!em->probe_links) { em->enabled = CONFIG_BOOLEAN_NO; goto endvfs; } diff --git a/collectors/ebpf.plugin/ebpf_vfs.h b/collectors/ebpf.plugin/ebpf_vfs.h index 67542ad4..87a21e39 100644 --- a/collectors/ebpf.plugin/ebpf_vfs.h +++ b/collectors/ebpf.plugin/ebpf_vfs.h @@ -156,7 +156,6 @@ extern netdata_publish_vfs_t **vfs_pid; extern void *ebpf_vfs_thread(void *ptr); extern void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr); -extern void clean_vfs_pid_structures(); extern struct config vfs_config; |