summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin/ebpf.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-08 16:27:04 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-08 16:27:04 +0000
commita836a244a3d2bdd4da1ee2641e3e957850668cea (patch)
treecb87c75b3677fab7144f868435243f864048a1e6 /collectors/ebpf.plugin/ebpf.c
parentAdding upstream version 1.38.1. (diff)
downloadnetdata-a836a244a3d2bdd4da1ee2641e3e957850668cea.tar.xz
netdata-a836a244a3d2bdd4da1ee2641e3e957850668cea.zip
Adding upstream version 1.39.0.upstream/1.39.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--collectors/ebpf.plugin/ebpf.c430
1 files changed, 357 insertions, 73 deletions
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index 67fe477c..c0764c60 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -28,11 +28,22 @@ int running_on_kernel = 0;
int ebpf_nprocs;
int isrh = 0;
int main_thread_id = 0;
+int process_pid_fd = -1;
pthread_mutex_t lock;
pthread_mutex_t ebpf_exit_cleanup;
pthread_mutex_t collect_data_mutex;
-pthread_cond_t collect_data_cond_var;
+
+struct netdata_static_thread cgroup_integration_thread = {
+ .name = "EBPF CGROUP INT",
+ .config_section = NULL,
+ .config_name = NULL,
+ .env_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = NULL
+};
ebpf_module_t ebpf_modules[] = {
{ .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread,
@@ -435,9 +446,6 @@ ebpf_sync_syscalls_t local_syscalls[] = {
};
-// Link with apps.plugin
-ebpf_process_stat_t *global_process_stat = NULL;
-
// Link with cgroup.plugin
netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup = {NULL, NULL};
int shm_fd_ebpf_cgroup = -1;
@@ -449,10 +457,19 @@ ebpf_network_viewer_options_t network_viewer_opt;
// Statistic
ebpf_plugin_stats_t plugin_statistics = {.core = 0, .legacy = 0, .running = 0, .threads = 0, .tracepoints = 0,
- .probes = 0, .retprobes = 0, .trampolines = 0};
+ .probes = 0, .retprobes = 0, .trampolines = 0, .memlock_kern = 0,
+ .hash_tables = 0};
#ifdef LIBBPF_MAJOR_VERSION
struct btf *default_btf = NULL;
+struct cachestat_bpf *cachestat_bpf_obj = NULL;
+struct dc_bpf *dc_bpf_obj = NULL;
+struct fd_bpf *fd_bpf_obj = NULL;
+struct mount_bpf *mount_bpf_obj = NULL;
+struct shm_bpf *shm_bpf_obj = NULL;
+struct socket_bpf *socket_bpf_obj = NULL;
+struct swap_bpf *bpf_obj = NULL;
+struct vfs_bpf *vfs_bpf_obj = NULL;
#else
void *default_btf = NULL;
#endif
@@ -460,6 +477,35 @@ char *btf_path = NULL;
/*****************************************************************
*
+ * FUNCTIONS USED TO ALLOCATE APPS/CGROUP MEMORIES (ARAL)
+ *
+ *****************************************************************/
+
+/**
+ * Allocate PID ARAL
+ *
+ * Allocate memory using ARAL functions to speed up processing.
+ *
+ * @param name the internal name used for allocated region.
+ * @param size size of each element inside allocated space
+ *
+ * @return It returns the address on success and NULL otherwise.
+ */
+ARAL *ebpf_allocate_pid_aral(char *name, size_t size)
+{
+ static size_t max_elements = NETDATA_EBPF_ALLOC_MAX_PID;
+ if (max_elements < NETDATA_EBPF_ALLOC_MIN_ELEMENTS) {
+ error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS);
+ max_elements = NETDATA_EBPF_ALLOC_MIN_ELEMENTS;
+ }
+
+ return aral_create(name, size,
+ 0, max_elements,
+ NULL, NULL, NULL, false, false);
+}
+
+/*****************************************************************
+ *
* FUNCTIONS USED TO CLEAN MEMORY AND OPERATE SYSTEM FILES
*
*****************************************************************/
@@ -488,10 +534,12 @@ static void ebpf_exit()
#endif
printf("DISABLE\n");
+ pthread_mutex_lock(&mutex_cgroup_shm);
if (shm_ebpf_cgroup.header) {
- munmap(shm_ebpf_cgroup.header, shm_ebpf_cgroup.header->body_length);
+ ebpf_unmap_cgroup_shared_memory();
shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
}
+ pthread_mutex_unlock(&mutex_cgroup_shm);
exit(0);
}
@@ -518,6 +566,126 @@ static void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link
bpf_object__close(objects);
}
+/**
+ * Unload Unique maps
+ *
+ * This function unload all BPF maps from threads using one unique BPF object.
+ */
+static void ebpf_unload_unique_maps()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_STOPPED) {
+ if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_NOT_RUNNING)
+ error("Cannot unload maps for thread %s, because it is not stopped.", ebpf_modules[i].thread_name);
+
+ continue;
+ }
+
+ ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
+ switch (i) {
+ case EBPF_MODULE_CACHESTAT_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (cachestat_bpf_obj)
+ cachestat_bpf__destroy(cachestat_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_DCSTAT_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (dc_bpf_obj)
+ dc_bpf__destroy(dc_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_FD_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (fd_bpf_obj)
+ fd_bpf__destroy(fd_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_MOUNT_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (mount_bpf_obj)
+ mount_bpf__destroy(mount_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_SHM_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (shm_bpf_obj)
+ shm_bpf__destroy(shm_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_SOCKET_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (socket_bpf_obj)
+ socket_bpf__destroy(socket_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_SWAP_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (bpf_obj)
+ swap_bpf__destroy(bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_VFS_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (vfs_bpf_obj)
+ vfs_bpf__destroy(vfs_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_PROCESS_IDX:
+ case EBPF_MODULE_DISK_IDX:
+ case EBPF_MODULE_HARDIRQ_IDX:
+ case EBPF_MODULE_SOFTIRQ_IDX:
+ case EBPF_MODULE_OOMKILL_IDX:
+ case EBPF_MODULE_MDFLUSH_IDX:
+ default:
+ continue;
+ }
+ }
+}
+
+/**
+ * Unload filesystem maps
+ *
+ * This function unload all BPF maps from filesystem thread.
+ */
+static void ebpf_unload_filesystems()
+{
+ if (ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
+ ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING)
+ return;
+
+ int i;
+ for (i = 0; localfs[i].filesystem != NULL; i++) {
+ ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
+ }
+}
+
+/**
+ * Unload sync maps
+ *
+ * This function unload all BPF maps from sync thread.
+ */
+static void ebpf_unload_sync()
+{
+ if (ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
+ ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING)
+ return;
+
+ int i;
+ for (i = 0; local_syscalls[i].syscall != NULL; i++) {
+ ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
+ }
+}
+
int ebpf_exit_plugin = 0;
/**
* Close the collector gracefully
@@ -529,7 +697,6 @@ static void ebpf_stop_threads(int sig)
UNUSED(sig);
static int only_one = 0;
- int i;
// Child thread should be closed by itself.
pthread_mutex_lock(&ebpf_exit_cleanup);
if (main_thread_id != gettid() || only_one) {
@@ -537,13 +704,26 @@ static void ebpf_stop_threads(int sig)
return;
}
only_one = 1;
- for (i = 0; ebpf_threads[i].name != NULL; i++) {
- if (ebpf_threads[i].enabled != NETDATA_THREAD_EBPF_STOPPED)
- netdata_thread_cancel(*ebpf_threads[i].thread);
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name != NULL; i++) {
+ if (ebpf_modules[i].enabled == NETDATA_THREAD_EBPF_RUNNING) {
+ netdata_thread_cancel(*ebpf_modules[i].thread->thread);
+#ifdef NETDATA_DEV_MODE
+ info("Sending cancel for thread %s", ebpf_modules[i].thread_name);
+#endif
+ }
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ netdata_thread_cancel(*cgroup_integration_thread.thread);
+#ifdef NETDATA_DEV_MODE
+ info("Sending cancel for thread %s", cgroup_integration_thread.name);
+#endif
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+
ebpf_exit_plugin = 1;
+
usec_t max = USEC_PER_SEC, step = 100000;
while (i && max) {
max -= step;
@@ -551,42 +731,18 @@ static void ebpf_stop_threads(int sig)
i = 0;
int j;
pthread_mutex_lock(&ebpf_exit_cleanup);
- for (j = 0; ebpf_threads[j].name != NULL; j++) {
- if (ebpf_threads[j].enabled != NETDATA_THREAD_EBPF_STOPPED)
+ for (j = 0; ebpf_modules[j].thread_name != NULL; j++) {
+ if (ebpf_modules[j].enabled == NETDATA_THREAD_EBPF_RUNNING)
i++;
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
- if (!i) {
- //Unload threads(except sync and filesystem)
- pthread_mutex_lock(&ebpf_exit_cleanup);
- for (i = 0; ebpf_threads[i].name != NULL; i++) {
- if (ebpf_threads[i].enabled == NETDATA_THREAD_EBPF_STOPPED && i != EBPF_MODULE_FILESYSTEM_IDX &&
- i != EBPF_MODULE_SYNC_IDX)
- ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- //Unload filesystem
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (ebpf_threads[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
- for (i = 0; localfs[i].filesystem != NULL; i++) {
- ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
- }
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- //Unload Sync
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (ebpf_threads[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
- for (i = 0; local_syscalls[i].syscall != NULL; i++) {
- ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
- }
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- }
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ ebpf_unload_unique_maps();
+ ebpf_unload_filesystems();
+ ebpf_unload_sync();
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_exit();
}
@@ -598,6 +754,58 @@ static void ebpf_stop_threads(int sig)
*****************************************************************/
/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param root a pointer for the targets.
+ */
+static void ebpf_create_apps_charts(struct ebpf_target *root)
+{
+ if (unlikely(!ebpf_all_pids))
+ return;
+
+ struct ebpf_target *w;
+ int newly_added = 0;
+
+ for (w = root; w; w = w->next) {
+ if (w->target)
+ continue;
+
+ if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
+ struct ebpf_pid_on_target *pid_on_target;
+
+ fprintf(
+ stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
+ (w->processes == 1) ? "" : "es");
+
+ for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
+ fprintf(stderr, " %d", pid_on_target->pid);
+ }
+
+ fputc('\n', stderr);
+ }
+
+ if (!w->exposed && w->processes) {
+ newly_added++;
+ w->exposed = 1;
+ if (debug_enabled || w->debug_enabled)
+ debug_log_int("%s just added - regenerating charts.", w->name);
+ }
+ }
+
+ if (!newly_added)
+ return;
+
+ int counter;
+ for (counter = 0; ebpf_modules[counter].thread_name; counter++) {
+ ebpf_module_t *current = &ebpf_modules[counter];
+ if (current->enabled == NETDATA_THREAD_EBPF_RUNNING && current->apps_charts && current->apps_routine)
+ current->apps_routine(current, root);
+ }
+}
+
+/**
* Get a value from a structure.
*
* @param basis it is the first address of the structure
@@ -876,9 +1084,9 @@ void ebpf_create_chart(char *type,
* @param module chart module name, this is the eBPF thread.
*/
void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family, char *charttype, int order,
- char *algorithm, struct target *root, int update_every, char *module)
+ char *algorithm, struct ebpf_target *root, int update_every, char *module)
{
- struct target *w;
+ struct ebpf_target *w;
ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, charttype, NULL, order,
update_every, module);
@@ -913,6 +1121,79 @@ void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist,
fflush(stdout);
}
+/**
+ * ARAL Charts
+ *
+ * Add chart to monitor ARAL usage
+ * Caller must call this function with mutex locked.
+ *
+ * @param name the name used to create aral
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em)
+{
+ static int priority = 140100;
+ char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
+ char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
+
+ snprintfz(em->memory_usage, NETDATA_EBPF_CHART_MEM_LENGTH -1, "aral_%s_size", name);
+ snprintfz(em->memory_allocations, NETDATA_EBPF_CHART_MEM_LENGTH -1, "aral_%s_alloc", name);
+
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ em->memory_usage,
+ "Bytes allocated for ARAL.",
+ "bytes",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ "netdata.ebpf_aral_stat_size",
+ priority++,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(mem,
+ mem,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ em->memory_allocations,
+ "Calls to allocate memory.",
+ "calls",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ "netdata.ebpf_aral_stat_alloc",
+ priority++,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(aral,
+ aral,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+}
+
+/**
+ * Send data from aral chart
+ *
+ * Send data for eBPF plugin
+ *
+ * @param memory a pointer to the allocated address
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em)
+{
+ char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
+ char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
+
+ struct aral_statistics *stats = aral_statistics(memory);
+
+ write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_usage);
+ write_chart_dimension(mem, (long long)stats->structures.allocated_bytes);
+ write_end_chart();
+
+ write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_allocations);
+ write_chart_dimension(aral, (long long)stats->structures.allocations);
+ write_end_chart();
+}
+
/*****************************************************************
*
* FUNCTIONS TO DEFINE OPTIONS
@@ -944,7 +1225,7 @@ void ebpf_global_labels(netdata_syscall_stat_t *is, netdata_publish_syscall_t *p
pio[i].dimension = dim[i];
pio[i].name = name[i];
- pio[i].algorithm = strdupz(ebpf_algorithms[algorithm[i]]);
+ pio[i].algorithm = ebpf_algorithms[algorithm[i]];
if (publish_prev) {
publish_prev->next = &pio[i];
}
@@ -1342,21 +1623,13 @@ static void read_local_addresses()
* Start Pthread Variable
*
* This function starts all pthread variables.
- *
- * @return It returns 0 on success and -1.
*/
-int ebpf_start_pthread_variables()
+void ebpf_start_pthread_variables()
{
pthread_mutex_init(&lock, NULL);
pthread_mutex_init(&ebpf_exit_cleanup, NULL);
pthread_mutex_init(&collect_data_mutex, NULL);
-
- if (pthread_cond_init(&collect_data_cond_var, NULL)) {
- error("Cannot start conditional variable to control Apps charts.");
- return -1;
- }
-
- return 0;
+ pthread_mutex_init(&mutex_cgroup_shm, NULL);
}
/**
@@ -1386,8 +1659,8 @@ static void ebpf_allocate_common_vectors()
return;
}
- all_pids = callocz((size_t)pid_max, sizeof(struct pid_stat *));
- global_process_stat = callocz((size_t)ebpf_nprocs, sizeof(ebpf_process_stat_t));
+ ebpf_all_pids = callocz((size_t)pid_max, sizeof(struct ebpf_pid_stat *));
+ ebpf_aral_init();
}
/**
@@ -1720,8 +1993,9 @@ void set_global_variables()
ebpf_configured_log_dir = LOG_DIR;
ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
- if (ebpf_nprocs > NETDATA_MAX_PROCESSOR) {
+ if (ebpf_nprocs < 0) {
ebpf_nprocs = NETDATA_MAX_PROCESSOR;
+ error("Cannot identify number of process, using default value %d", ebpf_nprocs);
}
isrh = get_redhat_release();
@@ -2088,7 +2362,7 @@ static pid_t ebpf_read_previous_pid(char *filename)
length = 63;
buffer[length] = '\0';
- old_pid = (pid_t)str2uint32_t(buffer);
+ old_pid = (pid_t) str2uint32_t(buffer, NULL);
}
fclose(fp);
@@ -2219,10 +2493,7 @@ int main(int argc, char **argv)
signal(SIGTERM, ebpf_stop_threads);
signal(SIGPIPE, ebpf_stop_threads);
- if (ebpf_start_pthread_variables()) {
- error("Cannot start mutex to control overall charts.");
- ebpf_exit();
- }
+ ebpf_start_pthread_variables();
netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
if(verify_netdata_host_prefix() == -1) ebpf_exit(6);
@@ -2241,6 +2512,12 @@ int main(int argc, char **argv)
ebpf_set_static_routine();
+ cgroup_integration_thread.thread = mallocz(sizeof(netdata_thread_t));
+ cgroup_integration_thread.start_routine = ebpf_cgroup_integration;
+
+ netdata_thread_create(cgroup_integration_thread.thread, cgroup_integration_thread.name,
+ NETDATA_THREAD_OPTION_DEFAULT, ebpf_cgroup_integration, NULL);
+
int i;
for (i = 0; ebpf_threads[i].name != NULL; i++) {
struct netdata_static_thread *st = &ebpf_threads[i];
@@ -2251,30 +2528,37 @@ int main(int argc, char **argv)
if (em->enabled || !i) {
st->thread = mallocz(sizeof(netdata_thread_t));
em->thread_id = i;
- st->enabled = NETDATA_THREAD_EBPF_RUNNING;
+ em->enabled = NETDATA_THREAD_EBPF_RUNNING;
netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em);
} else {
- st->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_NOT_RUNNING;
}
}
usec_t step = USEC_PER_SEC;
- int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
heartbeat_t hb;
heartbeat_init(&hb);
+ int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
+ int update_apps_list = update_apps_every - 1;
//Plugin will be killed when it receives a signal
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, step);
- // We are using a small heartbeat time to wake up thread,
- // but we should not update so frequently the shared memory data
- if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) {
- counter = 0;
- if (!shm_ebpf_cgroup.header)
- ebpf_map_cgroup_shared_memory();
-
- ebpf_parse_cgroup_shm_data();
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (ebpf_modules[i].enabled == NETDATA_THREAD_EBPF_RUNNING && process_pid_fd != -1) {
+ pthread_mutex_lock(&collect_data_mutex);
+ if (++update_apps_list == update_apps_every) {
+ update_apps_list = 0;
+ cleanup_exited_pids();
+ collect_data_for_all_processes(process_pid_fd);
+
+ pthread_mutex_lock(&lock);
+ ebpf_create_apps_charts(apps_groups_root_target);
+ pthread_mutex_unlock(&lock);
+ }
+ pthread_mutex_unlock(&collect_data_mutex);
}
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
}
ebpf_stop_threads(0);