From c933bf105b0de89e3fd524517daf163a16dd0d44 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 27 Jun 2023 20:46:20 +0200 Subject: Merging upstream version 1.40.1. Signed-off-by: Daniel Baumann --- collectors/ebpf.plugin/ebpf.c | 137 +++++++++------------ collectors/ebpf.plugin/ebpf_cachestat.c | 34 ++--- collectors/ebpf.plugin/ebpf_dcstat.c | 46 ++----- collectors/ebpf.plugin/ebpf_disk.c | 26 ++-- collectors/ebpf.plugin/ebpf_fd.c | 31 ++--- collectors/ebpf.plugin/ebpf_hardirq.c | 25 ++-- collectors/ebpf.plugin/ebpf_mdflush.c | 23 ++-- collectors/ebpf.plugin/ebpf_mount.c | 26 ++-- collectors/ebpf.plugin/ebpf_oomkill.c | 4 + collectors/ebpf.plugin/ebpf_shm.c | 30 ++--- collectors/ebpf.plugin/ebpf_softirq.c | 26 ++-- collectors/ebpf.plugin/ebpf_swap.c | 29 ++--- collectors/ebpf.plugin/ebpf_sync.c | 33 ++--- collectors/ebpf.plugin/ebpf_vfs.c | 29 ++--- collectors/proc.plugin/proc_net_dev.c | 16 +++ .../python.d.plugin/nvidia_smi/nvidia_smi.chart.py | 17 +-- 16 files changed, 212 insertions(+), 320 deletions(-) (limited to 'collectors') diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c index 45303574f..ffab37de3 100644 --- a/collectors/ebpf.plugin/ebpf.c +++ b/collectors/ebpf.plugin/ebpf.c @@ -539,6 +539,31 @@ ARAL *ebpf_allocate_pid_aral(char *name, size_t size) * *****************************************************************/ +/** + * Wait to avoid possible coredumps while process is closing. + */ +static inline void ebpf_check_before2go() +{ + int i = EBPF_OPTION_ALL_CHARTS; + usec_t max = USEC_PER_SEC, step = 200000; + while (i && max) { + max -= step; + sleep_usec(step); + i = 0; + int j; + pthread_mutex_lock(&ebpf_exit_cleanup); + for (j = 0; ebpf_modules[j].thread_name != NULL; j++) { + if (ebpf_modules[j].enabled == NETDATA_THREAD_EBPF_RUNNING) + i++; + } + pthread_mutex_unlock(&ebpf_exit_cleanup); + } + + if (i) { + error("eBPF cannot unload all threads on time, but it will go away"); + } +} + /** * Close the collector gracefully */ @@ -561,8 +586,10 @@ static void ebpf_exit() #ifdef NETDATA_INTERNAL_CHECKS error("Good bye world! I was PID %d", main_thread_id); #endif - printf("DISABLE\n"); + fprintf(stdout, "EXIT\n"); + fflush(stdout); + ebpf_check_before2go(); pthread_mutex_lock(&mutex_cgroup_shm); if (shm_ebpf_cgroup.header) { ebpf_unmap_cgroup_shared_memory(); @@ -604,6 +631,10 @@ static void ebpf_unload_unique_maps() { int i; for (i = 0; ebpf_modules[i].thread_name; i++) { + // These threads are cleaned with other functions + if (i > EBPF_MODULE_SOCKET_IDX) + continue; + if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_STOPPED) { if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_NOT_RUNNING) error("Cannot unload maps for thread %s, because it is not stopped.", ebpf_modules[i].thread_name); @@ -611,73 +642,18 @@ static void ebpf_unload_unique_maps() continue; } - ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links); - switch (i) { - case EBPF_MODULE_CACHESTAT_IDX: { -#ifdef LIBBPF_MAJOR_VERSION - if (cachestat_bpf_obj) - cachestat_bpf__destroy(cachestat_bpf_obj); -#endif - break; - } - case EBPF_MODULE_DCSTAT_IDX: { -#ifdef LIBBPF_MAJOR_VERSION - if (dc_bpf_obj) - dc_bpf__destroy(dc_bpf_obj); -#endif - break; - } - case EBPF_MODULE_FD_IDX: { -#ifdef LIBBPF_MAJOR_VERSION - if (fd_bpf_obj) - fd_bpf__destroy(fd_bpf_obj); -#endif - break; - } - case EBPF_MODULE_MOUNT_IDX: { -#ifdef LIBBPF_MAJOR_VERSION - if (mount_bpf_obj) - mount_bpf__destroy(mount_bpf_obj); -#endif - break; - } - case EBPF_MODULE_SHM_IDX: { -#ifdef LIBBPF_MAJOR_VERSION - if (shm_bpf_obj) - shm_bpf__destroy(shm_bpf_obj); -#endif - break; - } - case EBPF_MODULE_SOCKET_IDX: { -#ifdef LIBBPF_MAJOR_VERSION - if (socket_bpf_obj) - socket_bpf__destroy(socket_bpf_obj); -#endif - break; - } - case EBPF_MODULE_SWAP_IDX: { -#ifdef LIBBPF_MAJOR_VERSION - if (bpf_obj) - swap_bpf__destroy(bpf_obj); -#endif - break; - } - case EBPF_MODULE_VFS_IDX: { + if (ebpf_modules[i].load == EBPF_LOAD_LEGACY) { + ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links); + continue; + } + + if (i == EBPF_MODULE_SOCKET_IDX) { #ifdef LIBBPF_MAJOR_VERSION - if (vfs_bpf_obj) - vfs_bpf__destroy(vfs_bpf_obj); + if (socket_bpf_obj) + socket_bpf__destroy(socket_bpf_obj); #endif - break; - } - case EBPF_MODULE_PROCESS_IDX: - case EBPF_MODULE_DISK_IDX: - case EBPF_MODULE_HARDIRQ_IDX: - case EBPF_MODULE_SOFTIRQ_IDX: - case EBPF_MODULE_OOMKILL_IDX: - case EBPF_MODULE_MDFLUSH_IDX: - default: - continue; } + } } @@ -689,11 +665,15 @@ static void ebpf_unload_unique_maps() static void ebpf_unload_filesystems() { if (ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING || - ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING) + ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING || + ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].load != EBPF_LOAD_LEGACY) return; int i; for (i = 0; localfs[i].filesystem != NULL; i++) { + if (!localfs[i].objects) + continue; + ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links); } } @@ -711,6 +691,15 @@ static void ebpf_unload_sync() int i; for (i = 0; local_syscalls[i].syscall != NULL; i++) { + if (!local_syscalls[i].enabled) + continue; + +#ifdef LIBBPF_MAJOR_VERSION + if (local_syscalls[i].sync_obj) { + sync_bpf__destroy(local_syscalls[i].sync_obj); + continue; + } +#endif ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links); } } @@ -753,19 +742,7 @@ static void ebpf_stop_threads(int sig) ebpf_exit_plugin = 1; - usec_t max = USEC_PER_SEC, step = 100000; - while (i && max) { - max -= step; - sleep_usec(step); - i = 0; - int j; - pthread_mutex_lock(&ebpf_exit_cleanup); - for (j = 0; ebpf_modules[j].thread_name != NULL; j++) { - if (ebpf_modules[j].enabled == NETDATA_THREAD_EBPF_RUNNING) - i++; - } - pthread_mutex_unlock(&ebpf_exit_cleanup); - } + ebpf_check_before2go(); pthread_mutex_lock(&ebpf_exit_cleanup); ebpf_unload_unique_maps(); @@ -2650,7 +2627,7 @@ int main(int argc, char **argv) (void)heartbeat_next(&hb, step); pthread_mutex_lock(&ebpf_exit_cleanup); - if (ebpf_modules[i].enabled == NETDATA_THREAD_EBPF_RUNNING && process_pid_fd != -1) { + if (process_pid_fd != -1) { pthread_mutex_lock(&collect_data_mutex); if (++update_apps_list == update_apps_every) { update_apps_list = 0; diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c index 5bbbe1f43..c287136cf 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/collectors/ebpf.plugin/ebpf_cachestat.c @@ -336,27 +336,6 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf * *****************************************************************/ -/** - * Cachestat Free - * - * Cleanup variables after child threads to stop - * - * @param ptr thread data. - */ -static void ebpf_cachestat_free(ebpf_module_t *em) -{ - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPING; - pthread_mutex_unlock(&ebpf_exit_cleanup); - - freez(cachestat_vector); - freez(cachestat_values); - - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPED; - pthread_mutex_unlock(&ebpf_exit_cleanup); -} - /** * Cachestat exit. * @@ -368,7 +347,18 @@ static void ebpf_cachestat_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_cachestat_free(em); +#ifdef LIBBPF_MAJOR_VERSION + if (cachestat_bpf_obj) + cachestat_bpf__destroy(cachestat_bpf_obj); +#endif + + if (em->objects) { + ebpf_unload_legacy_code(em->objects, em->probe_links); + } + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); } /***************************************************************** diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c index 5a07e4619..4157f0c87 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/collectors/ebpf.plugin/ebpf_dcstat.c @@ -286,55 +286,29 @@ void dcstat_update_publish(netdata_publish_dcstat_t *out, uint64_t cache_access, *****************************************************************/ /** - * Clean names - * - * Clean the optional names allocated during startup. - */ -void ebpf_dcstat_clean_names() -{ - size_t i = 0; - while (dc_optional_name[i].program_name) { - freez(dc_optional_name[i].optional); - i++; - } -} - -/** - * DCstat Free + * DCstat exit * - * Cleanup variables after child threads to stop + * Cancel child and exit. * * @param ptr thread data. */ -static void ebpf_dcstat_free(ebpf_module_t *em ) +static void ebpf_dcstat_exit(void *ptr) { - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPING; - pthread_mutex_unlock(&ebpf_exit_cleanup); + ebpf_module_t *em = (ebpf_module_t *)ptr; - freez(dcstat_vector); - freez(dcstat_values); +#ifdef LIBBPF_MAJOR_VERSION + if (dc_bpf_obj) + dc_bpf__destroy(dc_bpf_obj); +#endif - ebpf_dcstat_clean_names(); + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); pthread_mutex_lock(&ebpf_exit_cleanup); em->enabled = NETDATA_THREAD_EBPF_STOPPED; pthread_mutex_unlock(&ebpf_exit_cleanup); } -/** - * DCstat exit - * - * Cancel child and exit. - * - * @param ptr thread data. - */ -static void ebpf_dcstat_exit(void *ptr) -{ - ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_dcstat_free(em); -} - /***************************************************************** * * APPS diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c index 71c972777..231186b84 100644 --- a/collectors/ebpf.plugin/ebpf_disk.c +++ b/collectors/ebpf.plugin/ebpf_disk.c @@ -435,17 +435,18 @@ static void ebpf_cleanup_disk_list() } /** - * DISK Free + * Disk exit. * - * Cleanup variables after child threads to stop + * Cancel child and exit. * * @param ptr thread data. */ -static void ebpf_disk_free(ebpf_module_t *em) +static void ebpf_disk_exit(void *ptr) { - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPING; - pthread_mutex_unlock(&ebpf_exit_cleanup); + ebpf_module_t *em = (ebpf_module_t *)ptr; + + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); ebpf_disk_disable_tracepoints(); @@ -463,19 +464,6 @@ static void ebpf_disk_free(ebpf_module_t *em) pthread_mutex_unlock(&ebpf_exit_cleanup); } -/** - * Disk exit. - * - * Cancel child and exit. - * - * @param ptr thread data. - */ -static void ebpf_disk_exit(void *ptr) -{ - ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_disk_free(em); -} - /***************************************************************** * * MAIN LOOP diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c index 6d3868952..d39e6ae20 100644 --- a/collectors/ebpf.plugin/ebpf_fd.c +++ b/collectors/ebpf.plugin/ebpf_fd.c @@ -370,39 +370,28 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em) *****************************************************************/ /** - * FD Free + * FD Exit * - * Cleanup variables after child threads to stop + * Cancel child thread and exit. * * @param ptr thread data. */ -static void ebpf_fd_free(ebpf_module_t *em) +static void ebpf_fd_exit(void *ptr) { - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPING; - pthread_mutex_unlock(&ebpf_exit_cleanup); + ebpf_module_t *em = (ebpf_module_t *)ptr; - freez(fd_values); - freez(fd_vector); +#ifdef LIBBPF_MAJOR_VERSION + if (fd_bpf_obj) + fd_bpf__destroy(fd_bpf_obj); +#endif + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); pthread_mutex_lock(&ebpf_exit_cleanup); em->enabled = NETDATA_THREAD_EBPF_STOPPED; pthread_mutex_unlock(&ebpf_exit_cleanup); } -/** - * FD Exit - * - * Cancel child thread and exit. - * - * @param ptr thread data. - */ -static void ebpf_fd_exit(void *ptr) -{ - ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_fd_free(em); -} - /***************************************************************** * * MAIN LOOP diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c index 113648ec9..f714c261c 100644 --- a/collectors/ebpf.plugin/ebpf_hardirq.c +++ b/collectors/ebpf.plugin/ebpf_hardirq.c @@ -188,35 +188,28 @@ void ebpf_hardirq_release(hardirq_val_t *stat) *****************************************************************/ /** - * Hardirq Free + * Hardirq Exit * - * Cleanup variables after child threads to stop + * Cancel child and exit. * * @param ptr thread data. */ -static void ebpf_hardirq_free(ebpf_module_t *em) +static void hardirq_exit(void *ptr) { + ebpf_module_t *em = (ebpf_module_t *)ptr; + + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); + for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) { ebpf_disable_tracepoint(&hardirq_tracepoints[i]); } + pthread_mutex_lock(&ebpf_exit_cleanup); em->enabled = NETDATA_THREAD_EBPF_STOPPED; pthread_mutex_unlock(&ebpf_exit_cleanup); } -/** - * Hardirq Exit - * - * Cancel child and exit. - * - * @param ptr thread data. - */ -static void hardirq_exit(void *ptr) -{ - ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_hardirq_free(em); -} - /***************************************************************** * MAIN LOOP *****************************************************************/ diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c index 321bd97ee..65ed860a4 100644 --- a/collectors/ebpf.plugin/ebpf_mdflush.c +++ b/collectors/ebpf.plugin/ebpf_mdflush.c @@ -38,21 +38,6 @@ static avl_tree_lock mdflush_pub; // tmp store for mdflush values we get from a per-CPU eBPF map. static mdflush_ebpf_val_t *mdflush_ebpf_vals = NULL; -/** - * MDflush Free - * - * Cleanup variables after child threads to stop - * - * @param ptr thread data. - */ -static void ebpf_mdflush_free(ebpf_module_t *em) -{ - freez(mdflush_ebpf_vals); - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPED; - pthread_mutex_unlock(&ebpf_exit_cleanup); -} - /** * MDflush exit * @@ -63,7 +48,13 @@ static void ebpf_mdflush_free(ebpf_module_t *em) static void mdflush_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_mdflush_free(em); + + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); } /** diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c index e0951f8c4..e48c89227 100644 --- a/collectors/ebpf.plugin/ebpf_mount.c +++ b/collectors/ebpf.plugin/ebpf_mount.c @@ -222,20 +222,6 @@ static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_ * *****************************************************************/ -/** - * Mount Free - * - * Cleanup variables after child threads to stop - * - * @param ptr thread data. - */ -static void ebpf_mount_free(ebpf_module_t *em) -{ - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPED; - pthread_mutex_unlock(&ebpf_exit_cleanup); -} - /** * Mount Exit * @@ -246,7 +232,17 @@ static void ebpf_mount_free(ebpf_module_t *em) static void ebpf_mount_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_mount_free(em); + +#ifdef LIBBPF_MAJOR_VERSION + if (mount_bpf_obj) + mount_bpf__destroy(mount_bpf_obj); +#endif + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); } /***************************************************************** diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c index 094875292..c80f44873 100644 --- a/collectors/ebpf.plugin/ebpf_oomkill.c +++ b/collectors/ebpf.plugin/ebpf_oomkill.c @@ -52,6 +52,10 @@ static netdata_publish_syscall_t oomkill_publish_aggregated = {.name = "oomkill" static void oomkill_cleanup(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; + + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); + pthread_mutex_lock(&ebpf_exit_cleanup); em->enabled = NETDATA_THREAD_EBPF_STOPPED; pthread_mutex_unlock(&ebpf_exit_cleanup); diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c index 093d65b60..94ac624b3 100644 --- a/collectors/ebpf.plugin/ebpf_shm.c +++ b/collectors/ebpf.plugin/ebpf_shm.c @@ -288,23 +288,6 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e * FUNCTIONS TO CLOSE THE THREAD *****************************************************************/ -/** - * SHM Free - * - * Cleanup variables after child threads to stop - * - * @param ptr thread data. - */ -static void ebpf_shm_free(ebpf_module_t *em) -{ - freez(shm_vector); - freez(shm_values); - - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPED; - pthread_mutex_unlock(&ebpf_exit_cleanup); -} - /** * SHM Exit * @@ -315,7 +298,18 @@ static void ebpf_shm_free(ebpf_module_t *em) static void ebpf_shm_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_shm_free(em); + +#ifdef LIBBPF_MAJOR_VERSION + if (shm_bpf_obj) + shm_bpf__destroy(shm_bpf_obj); +#endif + + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); } /***************************************************************** diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c index 01e2d0a52..b5c77bf06 100644 --- a/collectors/ebpf.plugin/ebpf_softirq.c +++ b/collectors/ebpf.plugin/ebpf_softirq.c @@ -61,17 +61,18 @@ static softirq_val_t softirq_vals[] = { static softirq_ebpf_val_t *softirq_ebpf_vals = NULL; /** - * Cachestat Free + * Cleanup * - * Cleanup variables after child threads to stop + * Clean up allocated memory. * * @param ptr thread data. */ -static void ebpf_softirq_free(ebpf_module_t *em) +static void softirq_cleanup(void *ptr) { - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPING; - pthread_mutex_unlock(&ebpf_exit_cleanup); + ebpf_module_t *em = (ebpf_module_t *)ptr; + + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); for (int i = 0; softirq_tracepoints[i].class != NULL; i++) { ebpf_disable_tracepoint(&softirq_tracepoints[i]); @@ -83,19 +84,6 @@ static void ebpf_softirq_free(ebpf_module_t *em) pthread_mutex_unlock(&ebpf_exit_cleanup); } -/** - * Cleanup - * - * Clean up allocated memory. - * - * @param ptr thread data. - */ -static void softirq_cleanup(void *ptr) -{ - ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_softirq_free(em); -} - /***************************************************************** * MAIN LOOP *****************************************************************/ diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c index c9129a3fa..492b59678 100644 --- a/collectors/ebpf.plugin/ebpf_swap.c +++ b/collectors/ebpf.plugin/ebpf_swap.c @@ -229,23 +229,6 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t * *****************************************************************/ -/** - * Cachestat Free - * - * Cleanup variables after child threads to stop - * - * @param ptr thread data. - */ -static void ebpf_swap_free(ebpf_module_t *em) -{ - freez(swap_vector); - freez(swap_values); - - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPED; - pthread_mutex_unlock(&ebpf_exit_cleanup); -} - /** * Swap exit * @@ -256,7 +239,17 @@ static void ebpf_swap_free(ebpf_module_t *em) static void ebpf_swap_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_swap_free(em); + +#ifdef LIBBPF_MAJOR_VERSION + if (bpf_obj) + swap_bpf__destroy(bpf_obj); +#endif + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); } /***************************************************************** diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c index 66e9c742c..9f1c0159d 100644 --- a/collectors/ebpf.plugin/ebpf_sync.c +++ b/collectors/ebpf.plugin/ebpf_sync.c @@ -349,6 +349,7 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em) for (i = 0; local_syscalls[i].syscall; i++) { ebpf_sync_syscalls_t *w = &local_syscalls[i]; w->sync_maps = local_syscalls[i].sync_maps; + em->maps = local_syscalls[i].sync_maps; if (w->enabled) { if (em->load & EBPF_LOAD_LEGACY) { if (ebpf_sync_load_legacy(w, em)) @@ -360,20 +361,23 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em) else { char syscall[NETDATA_EBPF_MAX_SYSCALL_LENGTH]; ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH, w->syscall, running_on_kernel); - w->sync_obj = sync_bpf__open(); - if (!w->sync_obj) { - errors++; - } else { - if (ebpf_is_function_inside_btf(default_btf, syscall)) { + if (ebpf_is_function_inside_btf(default_btf, syscall)) { + w->sync_obj = sync_bpf__open(); + if (!w->sync_obj) { + w->enabled = false; + errors++; + } else { if (ebpf_sync_load_and_attach(w->sync_obj, em, syscall, i)) { + w->enabled = false; errors++; } - } else { - if (ebpf_sync_load_legacy(w, em)) - errors++; } - em->thread_name = saved_name; + } else { + info("Cannot find syscall %s we are not going to monitor it.", syscall); + w->enabled = false; } + + em->thread_name = saved_name; } #endif } @@ -402,7 +406,7 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em) */ static void ebpf_sync_read_global_table(int maps_per_core) { - netdata_idx_t stored[ebpf_nprocs]; + netdata_idx_t stored[NETDATA_MAX_PROCESSOR]; uint32_t idx = NETDATA_SYNC_CALL; int i; for (i = 0; local_syscalls[i].syscall; i++) { @@ -456,7 +460,7 @@ static void ebpf_send_sync_chart(char *id, */ static void sync_send_data() { - if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled || local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled) { + if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled) { ebpf_send_sync_chart(NETDATA_EBPF_FILE_SYNC_CHART, NETDATA_SYNC_FSYNC_IDX, NETDATA_SYNC_FDATASYNC_IDX); } @@ -465,7 +469,7 @@ static void sync_send_data() sync_counter_publish_aggregated[NETDATA_SYNC_MSYNC_IDX].dimension, sync_hash_values[NETDATA_SYNC_MSYNC_IDX]); - if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled || local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled) { + if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled) { ebpf_send_sync_chart(NETDATA_EBPF_SYNC_CHART, NETDATA_SYNC_SYNC_IDX, NETDATA_SYNC_SYNCFS_IDX); } @@ -551,7 +555,7 @@ static void ebpf_create_sync_chart(char *id, */ static void ebpf_create_sync_charts(int update_every) { - if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled || local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled) + if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_FILE_SYNC_CHART, "Monitor calls for fsync(2) and fdatasync(2).", 21300, NETDATA_SYNC_FSYNC_IDX, NETDATA_SYNC_FDATASYNC_IDX, update_every); @@ -561,7 +565,7 @@ static void ebpf_create_sync_charts(int update_every) "Monitor calls for msync(2).", 21301, NETDATA_SYNC_MSYNC_IDX, NETDATA_SYNC_MSYNC_IDX, update_every); - if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled || local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled) + if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_SYNC_CHART, "Monitor calls for sync(2) and syncfs(2).", 21302, NETDATA_SYNC_SYNC_IDX, NETDATA_SYNC_SYNCFS_IDX, update_every); @@ -616,7 +620,6 @@ void *ebpf_sync_thread(void *ptr) netdata_thread_cleanup_push(ebpf_sync_exit, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; - em->maps = sync_maps; ebpf_set_sync_maps(); ebpf_sync_parse_syscalls(); diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c index bfc7ee8f7..6cafafc38 100644 --- a/collectors/ebpf.plugin/ebpf_vfs.c +++ b/collectors/ebpf.plugin/ebpf_vfs.c @@ -403,23 +403,6 @@ static inline int ebpf_vfs_load_and_attach(struct vfs_bpf *obj, ebpf_module_t *e * *****************************************************************/ -/** - * Cachestat Free - * - * Cleanup variables after child threads to stop - * - * @param ptr thread data. - */ -static void ebpf_vfs_free(ebpf_module_t *em) -{ - freez(vfs_hash_values); - freez(vfs_vector); - - pthread_mutex_lock(&ebpf_exit_cleanup); - em->enabled = NETDATA_THREAD_EBPF_STOPPED; - pthread_mutex_unlock(&ebpf_exit_cleanup); -} - /** * Exit * @@ -430,7 +413,17 @@ static void ebpf_vfs_free(ebpf_module_t *em) static void ebpf_vfs_exit(void *ptr) { ebpf_module_t *em = (ebpf_module_t *)ptr; - ebpf_vfs_free(em); + +#ifdef LIBBPF_MAJOR_VERSION + if (vfs_bpf_obj) + vfs_bpf__destroy(vfs_bpf_obj); +#endif + if (em->objects) + ebpf_unload_legacy_code(em->objects, em->probe_links); + + pthread_mutex_lock(&ebpf_exit_cleanup); + em->enabled = NETDATA_THREAD_EBPF_STOPPED; + pthread_mutex_unlock(&ebpf_exit_cleanup); } /***************************************************************** diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c index 9e8127cb6..16881d170 100644 --- a/collectors/proc.plugin/proc_net_dev.c +++ b/collectors/proc.plugin/proc_net_dev.c @@ -57,6 +57,8 @@ static struct netdev { int configured; int enabled; int updated; + + time_t discover_time; int carrier_file_exists; time_t carrier_file_lost_time; @@ -529,6 +531,7 @@ static inline void netdev_rename(struct netdev *d) { if(unlikely(r && !r->processed)) { netdev_rename_cgroup(d, r); r->processed = 1; + d->discover_time = 0; netdev_pending_renames--; } } @@ -671,6 +674,8 @@ static struct netdev *get_netdev(const char *name) { return d; } +#define NETDEV_VIRTUAL_COLLECT_DELAY 15 // 1 full run of the cgroups discovery thread (10 secs by default) + int do_proc_net_dev(int update_every, usec_t dt) { (void)dt; static SIMPLE_PATTERN *disabled_list = NULL; @@ -747,6 +752,8 @@ int do_proc_net_dev(int update_every, usec_t dt) { kernel_uint_t system_rbytes = 0; kernel_uint_t system_tbytes = 0; + time_t now = now_realtime_sec(); + size_t lines = procfile_lines(ff), l; for(l = 2; l < lines ;l++) { // require 17 words on each line @@ -765,6 +772,7 @@ int do_proc_net_dev(int update_every, usec_t dt) { // remember we configured it d->configured = 1; + d->discover_time = now; d->enabled = enable_new_interfaces; @@ -826,6 +834,14 @@ int do_proc_net_dev(int update_every, usec_t dt) { if(unlikely(!d->enabled)) continue; + // See https://github.com/netdata/netdata/issues/15206 + // This is necessary to prevent the creation of charts for virtual interfaces that will later be + // recreated as container interfaces (create container) or + // rediscovered and recreated only to be deleted almost immediately (stop/remove container) + if (d->virtual && (now - d->discover_time < NETDEV_VIRTUAL_COLLECT_DELAY)) { + continue; + } + if(likely(d->do_bandwidth != CONFIG_BOOLEAN_NO || !d->virtual)) { d->rbytes = str2kernel_uint_t(procfile_lineword(ff, l, 1)); d->tbytes = str2kernel_uint_t(procfile_lineword(ff, l, 9)); diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py index 6affae7b8..271c99638 100644 --- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py +++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py @@ -17,6 +17,8 @@ disabled_by_default = True NVIDIA_SMI = 'nvidia-smi' +NOT_AVAILABLE = 'N/A' + EMPTY_ROW = '' EMPTY_ROW_LIMIT = 500 POLLER_BREAK_ROW = '' @@ -481,13 +483,14 @@ class GPU: 'power_draw': self.power_draw(), } - pci_bw_max = self.pci_bw_max() - if not pci_bw_max: - data['rx_util_percent'] = 0 - data['tx_util_percent'] = 0 - else : - data['rx_util_percent'] = str(int(int(self.rx_util())*100/self.pci_bw_max())) - data['tx_util_percent'] = str(int(int(self.tx_util())*100/self.pci_bw_max())) + if self.rx_util() != NOT_AVAILABLE and self.tx_util() != NOT_AVAILABLE: + pci_bw_max = self.pci_bw_max() + if not pci_bw_max: + data['rx_util_percent'] = 0 + data['tx_util_percent'] = 0 + else: + data['rx_util_percent'] = str(int(int(self.rx_util()) * 100 / self.pci_bw_max())) + data['tx_util_percent'] = str(int(int(self.tx_util()) * 100 / self.pci_bw_max())) for v in POWER_STATES: -- cgit v1.2.3