summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-08 16:27:04 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-08 16:27:04 +0000
commita836a244a3d2bdd4da1ee2641e3e957850668cea (patch)
treecb87c75b3677fab7144f868435243f864048a1e6 /collectors/ebpf.plugin
parentAdding upstream version 1.38.1. (diff)
downloadnetdata-a836a244a3d2bdd4da1ee2641e3e957850668cea.tar.xz
netdata-a836a244a3d2bdd4da1ee2641e3e957850668cea.zip
Adding upstream version 1.39.0.upstream/1.39.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/ebpf.plugin')
-rw-r--r--collectors/ebpf.plugin/README.md4
-rw-r--r--collectors/ebpf.plugin/ebpf.c430
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf2
-rw-r--r--collectors/ebpf.plugin/ebpf.h40
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c510
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h345
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c54
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.h4
-rw-r--r--collectors/ebpf.plugin/ebpf_cgroup.c145
-rw-r--r--collectors/ebpf.plugin/ebpf_cgroup.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c52
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.h4
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.c9
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.c77
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.h5
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.c21
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.c195
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.h12
-rw-r--r--collectors/ebpf.plugin/ebpf_mdflush.c10
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.c33
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.c44
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c237
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h11
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.c57
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.h6
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c103
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.h4
-rw-r--r--collectors/ebpf.plugin/ebpf_softirq.c7
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c32
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.c7
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.c82
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.h6
-rw-r--r--collectors/ebpf.plugin/metrics.csv197
34 files changed, 1756 insertions, 993 deletions
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index deedf4d79..75f44a6e5 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -5,10 +5,10 @@ custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf
sidebar_label: "Kernel traces/metrics (eBPF)"
learn_status: "Published"
learn_topic_type: "References"
-learn_rel_path: "References/Collectors references/System metrics"
+learn_rel_path: "Integrations/Monitor/System metrics"
-->
-# eBPF monitoring with Netdata
+# Kernel traces/metrics (eBPF) collector
The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs to help you troubleshoot and debug how applications interact with the Linux kernel. The `ebpf.plugin` uses [tracepoints, trampoline, and2 kprobes](#how-netdata-collects-data-using-probes-and-tracepoints) to collect a wide array of high value data about the host that would otherwise be impossible to capture.
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index 67fe477c2..c0764c600 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -28,11 +28,22 @@ int running_on_kernel = 0;
int ebpf_nprocs;
int isrh = 0;
int main_thread_id = 0;
+int process_pid_fd = -1;
pthread_mutex_t lock;
pthread_mutex_t ebpf_exit_cleanup;
pthread_mutex_t collect_data_mutex;
-pthread_cond_t collect_data_cond_var;
+
+struct netdata_static_thread cgroup_integration_thread = {
+ .name = "EBPF CGROUP INT",
+ .config_section = NULL,
+ .config_name = NULL,
+ .env_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = NULL
+};
ebpf_module_t ebpf_modules[] = {
{ .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread,
@@ -435,9 +446,6 @@ ebpf_sync_syscalls_t local_syscalls[] = {
};
-// Link with apps.plugin
-ebpf_process_stat_t *global_process_stat = NULL;
-
// Link with cgroup.plugin
netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup = {NULL, NULL};
int shm_fd_ebpf_cgroup = -1;
@@ -449,10 +457,19 @@ ebpf_network_viewer_options_t network_viewer_opt;
// Statistic
ebpf_plugin_stats_t plugin_statistics = {.core = 0, .legacy = 0, .running = 0, .threads = 0, .tracepoints = 0,
- .probes = 0, .retprobes = 0, .trampolines = 0};
+ .probes = 0, .retprobes = 0, .trampolines = 0, .memlock_kern = 0,
+ .hash_tables = 0};
#ifdef LIBBPF_MAJOR_VERSION
struct btf *default_btf = NULL;
+struct cachestat_bpf *cachestat_bpf_obj = NULL;
+struct dc_bpf *dc_bpf_obj = NULL;
+struct fd_bpf *fd_bpf_obj = NULL;
+struct mount_bpf *mount_bpf_obj = NULL;
+struct shm_bpf *shm_bpf_obj = NULL;
+struct socket_bpf *socket_bpf_obj = NULL;
+struct swap_bpf *bpf_obj = NULL;
+struct vfs_bpf *vfs_bpf_obj = NULL;
#else
void *default_btf = NULL;
#endif
@@ -460,6 +477,35 @@ char *btf_path = NULL;
/*****************************************************************
*
+ * FUNCTIONS USED TO ALLOCATE APPS/CGROUP MEMORIES (ARAL)
+ *
+ *****************************************************************/
+
+/**
+ * Allocate PID ARAL
+ *
+ * Allocate memory using ARAL functions to speed up processing.
+ *
+ * @param name the internal name used for allocated region.
+ * @param size size of each element inside allocated space
+ *
+ * @return It returns the address on success and NULL otherwise.
+ */
+ARAL *ebpf_allocate_pid_aral(char *name, size_t size)
+{
+ static size_t max_elements = NETDATA_EBPF_ALLOC_MAX_PID;
+ if (max_elements < NETDATA_EBPF_ALLOC_MIN_ELEMENTS) {
+ error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS);
+ max_elements = NETDATA_EBPF_ALLOC_MIN_ELEMENTS;
+ }
+
+ return aral_create(name, size,
+ 0, max_elements,
+ NULL, NULL, NULL, false, false);
+}
+
+/*****************************************************************
+ *
* FUNCTIONS USED TO CLEAN MEMORY AND OPERATE SYSTEM FILES
*
*****************************************************************/
@@ -488,10 +534,12 @@ static void ebpf_exit()
#endif
printf("DISABLE\n");
+ pthread_mutex_lock(&mutex_cgroup_shm);
if (shm_ebpf_cgroup.header) {
- munmap(shm_ebpf_cgroup.header, shm_ebpf_cgroup.header->body_length);
+ ebpf_unmap_cgroup_shared_memory();
shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
}
+ pthread_mutex_unlock(&mutex_cgroup_shm);
exit(0);
}
@@ -518,6 +566,126 @@ static void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link
bpf_object__close(objects);
}
+/**
+ * Unload Unique maps
+ *
+ * This function unload all BPF maps from threads using one unique BPF object.
+ */
+static void ebpf_unload_unique_maps()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_STOPPED) {
+ if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_NOT_RUNNING)
+ error("Cannot unload maps for thread %s, because it is not stopped.", ebpf_modules[i].thread_name);
+
+ continue;
+ }
+
+ ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
+ switch (i) {
+ case EBPF_MODULE_CACHESTAT_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (cachestat_bpf_obj)
+ cachestat_bpf__destroy(cachestat_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_DCSTAT_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (dc_bpf_obj)
+ dc_bpf__destroy(dc_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_FD_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (fd_bpf_obj)
+ fd_bpf__destroy(fd_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_MOUNT_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (mount_bpf_obj)
+ mount_bpf__destroy(mount_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_SHM_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (shm_bpf_obj)
+ shm_bpf__destroy(shm_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_SOCKET_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (socket_bpf_obj)
+ socket_bpf__destroy(socket_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_SWAP_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (bpf_obj)
+ swap_bpf__destroy(bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_VFS_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+ if (vfs_bpf_obj)
+ vfs_bpf__destroy(vfs_bpf_obj);
+#endif
+ break;
+ }
+ case EBPF_MODULE_PROCESS_IDX:
+ case EBPF_MODULE_DISK_IDX:
+ case EBPF_MODULE_HARDIRQ_IDX:
+ case EBPF_MODULE_SOFTIRQ_IDX:
+ case EBPF_MODULE_OOMKILL_IDX:
+ case EBPF_MODULE_MDFLUSH_IDX:
+ default:
+ continue;
+ }
+ }
+}
+
+/**
+ * Unload filesystem maps
+ *
+ * This function unload all BPF maps from filesystem thread.
+ */
+static void ebpf_unload_filesystems()
+{
+ if (ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
+ ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING)
+ return;
+
+ int i;
+ for (i = 0; localfs[i].filesystem != NULL; i++) {
+ ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
+ }
+}
+
+/**
+ * Unload sync maps
+ *
+ * This function unload all BPF maps from sync thread.
+ */
+static void ebpf_unload_sync()
+{
+ if (ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
+ ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING)
+ return;
+
+ int i;
+ for (i = 0; local_syscalls[i].syscall != NULL; i++) {
+ ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
+ }
+}
+
int ebpf_exit_plugin = 0;
/**
* Close the collector gracefully
@@ -529,7 +697,6 @@ static void ebpf_stop_threads(int sig)
UNUSED(sig);
static int only_one = 0;
- int i;
// Child thread should be closed by itself.
pthread_mutex_lock(&ebpf_exit_cleanup);
if (main_thread_id != gettid() || only_one) {
@@ -537,13 +704,26 @@ static void ebpf_stop_threads(int sig)
return;
}
only_one = 1;
- for (i = 0; ebpf_threads[i].name != NULL; i++) {
- if (ebpf_threads[i].enabled != NETDATA_THREAD_EBPF_STOPPED)
- netdata_thread_cancel(*ebpf_threads[i].thread);
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name != NULL; i++) {
+ if (ebpf_modules[i].enabled == NETDATA_THREAD_EBPF_RUNNING) {
+ netdata_thread_cancel(*ebpf_modules[i].thread->thread);
+#ifdef NETDATA_DEV_MODE
+ info("Sending cancel for thread %s", ebpf_modules[i].thread_name);
+#endif
+ }
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ netdata_thread_cancel(*cgroup_integration_thread.thread);
+#ifdef NETDATA_DEV_MODE
+ info("Sending cancel for thread %s", cgroup_integration_thread.name);
+#endif
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+
ebpf_exit_plugin = 1;
+
usec_t max = USEC_PER_SEC, step = 100000;
while (i && max) {
max -= step;
@@ -551,42 +731,18 @@ static void ebpf_stop_threads(int sig)
i = 0;
int j;
pthread_mutex_lock(&ebpf_exit_cleanup);
- for (j = 0; ebpf_threads[j].name != NULL; j++) {
- if (ebpf_threads[j].enabled != NETDATA_THREAD_EBPF_STOPPED)
+ for (j = 0; ebpf_modules[j].thread_name != NULL; j++) {
+ if (ebpf_modules[j].enabled == NETDATA_THREAD_EBPF_RUNNING)
i++;
}
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
- if (!i) {
- //Unload threads(except sync and filesystem)
- pthread_mutex_lock(&ebpf_exit_cleanup);
- for (i = 0; ebpf_threads[i].name != NULL; i++) {
- if (ebpf_threads[i].enabled == NETDATA_THREAD_EBPF_STOPPED && i != EBPF_MODULE_FILESYSTEM_IDX &&
- i != EBPF_MODULE_SYNC_IDX)
- ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- //Unload filesystem
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (ebpf_threads[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
- for (i = 0; localfs[i].filesystem != NULL; i++) {
- ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
- }
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- //Unload Sync
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (ebpf_threads[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
- for (i = 0; local_syscalls[i].syscall != NULL; i++) {
- ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
- }
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- }
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ ebpf_unload_unique_maps();
+ ebpf_unload_filesystems();
+ ebpf_unload_sync();
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_exit();
}
@@ -598,6 +754,58 @@ static void ebpf_stop_threads(int sig)
*****************************************************************/
/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param root a pointer for the targets.
+ */
+static void ebpf_create_apps_charts(struct ebpf_target *root)
+{
+ if (unlikely(!ebpf_all_pids))
+ return;
+
+ struct ebpf_target *w;
+ int newly_added = 0;
+
+ for (w = root; w; w = w->next) {
+ if (w->target)
+ continue;
+
+ if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
+ struct ebpf_pid_on_target *pid_on_target;
+
+ fprintf(
+ stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
+ (w->processes == 1) ? "" : "es");
+
+ for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
+ fprintf(stderr, " %d", pid_on_target->pid);
+ }
+
+ fputc('\n', stderr);
+ }
+
+ if (!w->exposed && w->processes) {
+ newly_added++;
+ w->exposed = 1;
+ if (debug_enabled || w->debug_enabled)
+ debug_log_int("%s just added - regenerating charts.", w->name);
+ }
+ }
+
+ if (!newly_added)
+ return;
+
+ int counter;
+ for (counter = 0; ebpf_modules[counter].thread_name; counter++) {
+ ebpf_module_t *current = &ebpf_modules[counter];
+ if (current->enabled == NETDATA_THREAD_EBPF_RUNNING && current->apps_charts && current->apps_routine)
+ current->apps_routine(current, root);
+ }
+}
+
+/**
* Get a value from a structure.
*
* @param basis it is the first address of the structure
@@ -876,9 +1084,9 @@ void ebpf_create_chart(char *type,
* @param module chart module name, this is the eBPF thread.
*/
void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family, char *charttype, int order,
- char *algorithm, struct target *root, int update_every, char *module)
+ char *algorithm, struct ebpf_target *root, int update_every, char *module)
{
- struct target *w;
+ struct ebpf_target *w;
ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, charttype, NULL, order,
update_every, module);
@@ -913,6 +1121,79 @@ void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist,
fflush(stdout);
}
+/**
+ * ARAL Charts
+ *
+ * Add chart to monitor ARAL usage
+ * Caller must call this function with mutex locked.
+ *
+ * @param name the name used to create aral
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em)
+{
+ static int priority = 140100;
+ char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
+ char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
+
+ snprintfz(em->memory_usage, NETDATA_EBPF_CHART_MEM_LENGTH -1, "aral_%s_size", name);
+ snprintfz(em->memory_allocations, NETDATA_EBPF_CHART_MEM_LENGTH -1, "aral_%s_alloc", name);
+
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ em->memory_usage,
+ "Bytes allocated for ARAL.",
+ "bytes",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ "netdata.ebpf_aral_stat_size",
+ priority++,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(mem,
+ mem,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ em->memory_allocations,
+ "Calls to allocate memory.",
+ "calls",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ "netdata.ebpf_aral_stat_alloc",
+ priority++,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(aral,
+ aral,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+}
+
+/**
+ * Send data from aral chart
+ *
+ * Send data for eBPF plugin
+ *
+ * @param memory a pointer to the allocated address
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em)
+{
+ char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
+ char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
+
+ struct aral_statistics *stats = aral_statistics(memory);
+
+ write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_usage);
+ write_chart_dimension(mem, (long long)stats->structures.allocated_bytes);
+ write_end_chart();
+
+ write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_allocations);
+ write_chart_dimension(aral, (long long)stats->structures.allocations);
+ write_end_chart();
+}
+
/*****************************************************************
*
* FUNCTIONS TO DEFINE OPTIONS
@@ -944,7 +1225,7 @@ void ebpf_global_labels(netdata_syscall_stat_t *is, netdata_publish_syscall_t *p
pio[i].dimension = dim[i];
pio[i].name = name[i];
- pio[i].algorithm = strdupz(ebpf_algorithms[algorithm[i]]);
+ pio[i].algorithm = ebpf_algorithms[algorithm[i]];
if (publish_prev) {
publish_prev->next = &pio[i];
}
@@ -1342,21 +1623,13 @@ static void read_local_addresses()
* Start Pthread Variable
*
* This function starts all pthread variables.
- *
- * @return It returns 0 on success and -1.
*/
-int ebpf_start_pthread_variables()
+void ebpf_start_pthread_variables()
{
pthread_mutex_init(&lock, NULL);
pthread_mutex_init(&ebpf_exit_cleanup, NULL);
pthread_mutex_init(&collect_data_mutex, NULL);
-
- if (pthread_cond_init(&collect_data_cond_var, NULL)) {
- error("Cannot start conditional variable to control Apps charts.");
- return -1;
- }
-
- return 0;
+ pthread_mutex_init(&mutex_cgroup_shm, NULL);
}
/**
@@ -1386,8 +1659,8 @@ static void ebpf_allocate_common_vectors()
return;
}
- all_pids = callocz((size_t)pid_max, sizeof(struct pid_stat *));
- global_process_stat = callocz((size_t)ebpf_nprocs, sizeof(ebpf_process_stat_t));
+ ebpf_all_pids = callocz((size_t)pid_max, sizeof(struct ebpf_pid_stat *));
+ ebpf_aral_init();
}
/**
@@ -1720,8 +1993,9 @@ void set_global_variables()
ebpf_configured_log_dir = LOG_DIR;
ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
- if (ebpf_nprocs > NETDATA_MAX_PROCESSOR) {
+ if (ebpf_nprocs < 0) {
ebpf_nprocs = NETDATA_MAX_PROCESSOR;
+ error("Cannot identify number of process, using default value %d", ebpf_nprocs);
}
isrh = get_redhat_release();
@@ -2088,7 +2362,7 @@ static pid_t ebpf_read_previous_pid(char *filename)
length = 63;
buffer[length] = '\0';
- old_pid = (pid_t)str2uint32_t(buffer);
+ old_pid = (pid_t) str2uint32_t(buffer, NULL);
}
fclose(fp);
@@ -2219,10 +2493,7 @@ int main(int argc, char **argv)
signal(SIGTERM, ebpf_stop_threads);
signal(SIGPIPE, ebpf_stop_threads);
- if (ebpf_start_pthread_variables()) {
- error("Cannot start mutex to control overall charts.");
- ebpf_exit();
- }
+ ebpf_start_pthread_variables();
netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
if(verify_netdata_host_prefix() == -1) ebpf_exit(6);
@@ -2241,6 +2512,12 @@ int main(int argc, char **argv)
ebpf_set_static_routine();
+ cgroup_integration_thread.thread = mallocz(sizeof(netdata_thread_t));
+ cgroup_integration_thread.start_routine = ebpf_cgroup_integration;
+
+ netdata_thread_create(cgroup_integration_thread.thread, cgroup_integration_thread.name,
+ NETDATA_THREAD_OPTION_DEFAULT, ebpf_cgroup_integration, NULL);
+
int i;
for (i = 0; ebpf_threads[i].name != NULL; i++) {
struct netdata_static_thread *st = &ebpf_threads[i];
@@ -2251,30 +2528,37 @@ int main(int argc, char **argv)
if (em->enabled || !i) {
st->thread = mallocz(sizeof(netdata_thread_t));
em->thread_id = i;
- st->enabled = NETDATA_THREAD_EBPF_RUNNING;
+ em->enabled = NETDATA_THREAD_EBPF_RUNNING;
netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em);
} else {
- st->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_NOT_RUNNING;
}
}
usec_t step = USEC_PER_SEC;
- int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
heartbeat_t hb;
heartbeat_init(&hb);
+ int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
+ int update_apps_list = update_apps_every - 1;
//Plugin will be killed when it receives a signal
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, step);
- // We are using a small heartbeat time to wake up thread,
- // but we should not update so frequently the shared memory data
- if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) {
- counter = 0;
- if (!shm_ebpf_cgroup.header)
- ebpf_map_cgroup_shared_memory();
-
- ebpf_parse_cgroup_shm_data();
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (ebpf_modules[i].enabled == NETDATA_THREAD_EBPF_RUNNING && process_pid_fd != -1) {
+ pthread_mutex_lock(&collect_data_mutex);
+ if (++update_apps_list == update_apps_every) {
+ update_apps_list = 0;
+ cleanup_exited_pids();
+ collect_data_for_all_processes(process_pid_fd);
+
+ pthread_mutex_lock(&lock);
+ ebpf_create_apps_charts(apps_groups_root_target);
+ pthread_mutex_unlock(&lock);
+ }
+ pthread_mutex_unlock(&collect_data_mutex);
}
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
}
ebpf_stop_threads(0);
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index 112df275d..6a5ec5c39 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -55,7 +55,7 @@
disk = no
fd = yes
filesystem = no
- hardirq = yes
+ hardirq = no
mdflush = no
mount = yes
oomkill = yes
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index 16e62498c..5b48adc62 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -36,6 +36,26 @@
#define NETDATA_EBPF_OLD_CONFIG_FILE "ebpf.conf"
#define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf"
+#ifdef LIBBPF_MAJOR_VERSION // BTF code
+#include "includes/cachestat.skel.h"
+#include "includes/dc.skel.h"
+#include "includes/fd.skel.h"
+#include "includes/mount.skel.h"
+#include "includes/shm.skel.h"
+#include "includes/socket.skel.h"
+#include "includes/swap.skel.h"
+#include "includes/vfs.skel.h"
+
+extern struct cachestat_bpf *cachestat_bpf_obj;
+extern struct dc_bpf *dc_bpf_obj;
+extern struct fd_bpf *fd_bpf_obj;
+extern struct mount_bpf *mount_bpf_obj;
+extern struct shm_bpf *shm_bpf_obj;
+extern struct socket_bpf *socket_bpf_obj;
+extern struct swap_bpf *bpf_obj;
+extern struct vfs_bpf *vfs_bpf_obj;
+#endif
+
typedef struct netdata_syscall_stat {
unsigned long bytes; // total number of bytes
uint64_t call; // total number of calls
@@ -108,12 +128,6 @@ typedef struct ebpf_tracepoint {
char *event;
} ebpf_tracepoint_t;
-enum ebpf_threads_status {
- NETDATA_THREAD_EBPF_RUNNING,
- NETDATA_THREAD_EBPF_STOPPING,
- NETDATA_THREAD_EBPF_STOPPED
-};
-
// Copied from musl header
#ifndef offsetof
#if __GNUC__ > 3
@@ -143,6 +157,8 @@ enum ebpf_threads_status {
// Statistics charts
#define NETDATA_EBPF_THREADS "ebpf_threads"
#define NETDATA_EBPF_LOAD_METHOD "ebpf_load_methods"
+#define NETDATA_EBPF_KERNEL_MEMORY "ebpf_kernel_memory"
+#define NETDATA_EBPF_HASH_TABLES_LOADED "ebpf_hash_tables_count"
// Log file
#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
@@ -176,9 +192,9 @@ extern int ebpf_nprocs;
extern int running_on_kernel;
extern int isrh;
extern char *ebpf_plugin_dir;
+extern int process_pid_fd;
extern pthread_mutex_t collect_data_mutex;
-extern pthread_cond_t collect_data_cond_var;
// Common functions
void ebpf_global_labels(netdata_syscall_stat_t *is,
@@ -235,14 +251,12 @@ void ebpf_create_charts_on_apps(char *name,
char *charttype,
int order,
char *algorithm,
- struct target *root,
+ struct ebpf_target *root,
int update_every,
char *module);
void write_end_chart();
-void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps);
-
int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp);
int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp);
uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps);
@@ -264,16 +278,15 @@ void ebpf_pid_file(char *filename, size_t length);
// Common variables
extern int debug_enabled;
-extern struct pid_stat *root_of_pids;
+extern struct ebpf_pid_stat *ebpf_root_of_pids;
extern ebpf_cgroup_target_t *ebpf_cgroup_pids;
extern char *ebpf_algorithms[];
extern struct config collector_config;
-extern ebpf_process_stat_t *global_process_stat;
extern netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup;
extern int shm_fd_ebpf_cgroup;
extern sem_t *shm_sem_ebpf_cgroup;
extern pthread_mutex_t mutex_cgroup_shm;
-extern size_t all_pids_count;
+extern size_t ebpf_all_pids_count;
extern ebpf_plugin_stats_t plugin_statistics;
#ifdef LIBBPF_MAJOR_VERSION
extern struct btf *default_btf;
@@ -293,6 +306,7 @@ void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, c
char *charttype, char *context, int order, int update_every);
void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end);
void ebpf_update_disabled_plugin_stats(ebpf_module_t *em);
+ARAL *ebpf_allocate_pid_aral(char *name, size_t size);
extern ebpf_filesystem_partitions_t localfs[];
extern ebpf_sync_syscalls_t local_syscalls[];
extern int ebpf_exit_plugin;
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
index 7519e0640..d6db4c676 100644
--- a/collectors/ebpf.plugin/ebpf_apps.c
+++ b/collectors/ebpf.plugin/ebpf_apps.c
@@ -5,6 +5,344 @@
#include "ebpf_apps.h"
// ----------------------------------------------------------------------------
+// ARAL vectors used to speed up processing
+ARAL *ebpf_aral_apps_pid_stat = NULL;
+ARAL *ebpf_aral_process_stat = NULL;
+ARAL *ebpf_aral_socket_pid = NULL;
+ARAL *ebpf_aral_cachestat_pid = NULL;
+ARAL *ebpf_aral_dcstat_pid = NULL;
+ARAL *ebpf_aral_vfs_pid = NULL;
+ARAL *ebpf_aral_fd_pid = NULL;
+ARAL *ebpf_aral_shm_pid = NULL;
+
+// ----------------------------------------------------------------------------
+// Global vectors used with apps
+ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
+netdata_publish_cachestat_t **cachestat_pid = NULL;
+netdata_publish_dcstat_t **dcstat_pid = NULL;
+netdata_publish_swap_t **swap_pid = NULL;
+netdata_publish_vfs_t **vfs_pid = NULL;
+netdata_fd_stat_t **fd_pid = NULL;
+netdata_publish_shm_t **shm_pid = NULL;
+ebpf_process_stat_t **global_process_stats = NULL;
+
+/**
+ * eBPF ARAL Init
+ *
+ * Initiallize array allocator that will be used when integration with apps and ebpf is created.
+ */
+void ebpf_aral_init(void)
+{
+ size_t max_elements = NETDATA_EBPF_ALLOC_MAX_PID;
+ if (max_elements < NETDATA_EBPF_ALLOC_MIN_ELEMENTS) {
+ error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS);
+ max_elements = NETDATA_EBPF_ALLOC_MIN_ELEMENTS;
+ }
+
+ ebpf_aral_apps_pid_stat = ebpf_allocate_pid_aral("ebpf_pid_stat", sizeof(struct ebpf_pid_stat));
+
+ ebpf_aral_process_stat = ebpf_allocate_pid_aral(NETDATA_EBPF_PROC_ARAL_NAME, sizeof(ebpf_process_stat_t));
+
+#ifdef NETDATA_DEV_MODE
+ info("Plugin is using ARAL with values %d", NETDATA_EBPF_ALLOC_MAX_PID);
+#endif
+}
+
+/**
+ * eBPF pid stat get
+ *
+ * Get a ebpf_pid_stat entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+struct ebpf_pid_stat *ebpf_pid_stat_get(void)
+{
+ struct ebpf_pid_stat *target = aral_mallocz(ebpf_aral_apps_pid_stat);
+ memset(target, 0, sizeof(struct ebpf_pid_stat));
+ return target;
+}
+
+/**
+ * eBPF target release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_pid_stat_release(struct ebpf_pid_stat *stat)
+{
+ aral_freez(ebpf_aral_apps_pid_stat, stat);
+}
+
+/*****************************************************************
+ *
+ * PROCESS ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF process stat get
+ *
+ * Get a ebpf_pid_stat entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+ebpf_process_stat_t *ebpf_process_stat_get(void)
+{
+ ebpf_process_stat_t *target = aral_mallocz(ebpf_aral_process_stat);
+ memset(target, 0, sizeof(ebpf_process_stat_t));
+ return target;
+}
+
+/**
+ * eBPF process release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_process_stat_release(ebpf_process_stat_t *stat)
+{
+ aral_freez(ebpf_aral_process_stat, stat);
+}
+
+/*****************************************************************
+ *
+ * SOCKET ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF socket Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_socket_aral_init()
+{
+ ebpf_aral_socket_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SOCKET_ARAL_NAME, sizeof(ebpf_socket_publish_apps_t));
+}
+
+/**
+ * eBPF socket get
+ *
+ * Get a ebpf_socket_publish_apps_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void)
+{
+ ebpf_socket_publish_apps_t *target = aral_mallocz(ebpf_aral_socket_pid);
+ memset(target, 0, sizeof(ebpf_socket_publish_apps_t));
+ return target;
+}
+
+/**
+ * eBPF socket release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_socket_release(ebpf_socket_publish_apps_t *stat)
+{
+ aral_freez(ebpf_aral_socket_pid, stat);
+}
+
+/*****************************************************************
+ *
+ * CACHESTAT ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF Cachestat Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_cachestat_aral_init()
+{
+ ebpf_aral_cachestat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_CACHESTAT_ARAL_NAME, sizeof(netdata_publish_cachestat_t));
+}
+
+/**
+ * eBPF publish cachestat get
+ *
+ * Get a netdata_publish_cachestat_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void)
+{
+ netdata_publish_cachestat_t *target = aral_mallocz(ebpf_aral_cachestat_pid);
+ memset(target, 0, sizeof(netdata_publish_cachestat_t));
+ return target;
+}
+
+/**
+ * eBPF cachestat release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_cachestat_release(netdata_publish_cachestat_t *stat)
+{
+ aral_freez(ebpf_aral_cachestat_pid, stat);
+}
+
+/*****************************************************************
+ *
+ * DCSTAT ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF directory cache Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_dcstat_aral_init()
+{
+ ebpf_aral_dcstat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_DCSTAT_ARAL_NAME, sizeof(netdata_publish_dcstat_t));
+}
+
+/**
+ * eBPF publish dcstat get
+ *
+ * Get a netdata_publish_dcstat_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void)
+{
+ netdata_publish_dcstat_t *target = aral_mallocz(ebpf_aral_dcstat_pid);
+ memset(target, 0, sizeof(netdata_publish_dcstat_t));
+ return target;
+}
+
+/**
+ * eBPF dcstat release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_dcstat_release(netdata_publish_dcstat_t *stat)
+{
+ aral_freez(ebpf_aral_dcstat_pid, stat);
+}
+
+/*****************************************************************
+ *
+ * VFS ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF VFS Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_vfs_aral_init()
+{
+ ebpf_aral_vfs_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_VFS_ARAL_NAME, sizeof(netdata_publish_vfs_t));
+}
+
+/**
+ * eBPF publish VFS get
+ *
+ * Get a netdata_publish_vfs_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_publish_vfs_t *ebpf_vfs_get(void)
+{
+ netdata_publish_vfs_t *target = aral_mallocz(ebpf_aral_vfs_pid);
+ memset(target, 0, sizeof(netdata_publish_vfs_t));
+ return target;
+}
+
+/**
+ * eBPF VFS release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_vfs_release(netdata_publish_vfs_t *stat)
+{
+ aral_freez(ebpf_aral_vfs_pid, stat);
+}
+
+/*****************************************************************
+ *
+ * FD ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF file descriptor Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_fd_aral_init()
+{
+ ebpf_aral_fd_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_FD_ARAL_NAME, sizeof(netdata_fd_stat_t));
+}
+
+/**
+ * eBPF publish file descriptor get
+ *
+ * Get a netdata_fd_stat_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_fd_stat_t *ebpf_fd_stat_get(void)
+{
+ netdata_fd_stat_t *target = aral_mallocz(ebpf_aral_fd_pid);
+ memset(target, 0, sizeof(netdata_fd_stat_t));
+ return target;
+}
+
+/**
+ * eBPF file descriptor release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_fd_release(netdata_fd_stat_t *stat)
+{
+ aral_freez(ebpf_aral_fd_pid, stat);
+}
+
+/*****************************************************************
+ *
+ * SHM ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF shared memory Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_shm_aral_init()
+{
+ ebpf_aral_shm_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SHM_ARAL_NAME, sizeof(netdata_publish_shm_t));
+}
+
+/**
+ * eBPF shared memory get
+ *
+ * Get a netdata_publish_shm_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_publish_shm_t *ebpf_shm_stat_get(void)
+{
+ netdata_publish_shm_t *target = aral_mallocz(ebpf_aral_shm_pid);
+ memset(target, 0, sizeof(netdata_publish_shm_t));
+ return target;
+}
+
+/**
+ * eBPF shared memory release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_shm_release(netdata_publish_shm_t *stat)
+{
+ aral_freez(ebpf_aral_shm_pid, stat);
+}
+
+// ----------------------------------------------------------------------------
// internal flags
// handled in code (automatically set)
@@ -49,7 +387,7 @@ int ebpf_read_hash_table(void *ep, int fd, uint32_t pid)
*
* @return
*/
-size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct pid_on_target *pids)
+size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct ebpf_pid_on_target *pids)
{
size_t count = 0;
while (pids) {
@@ -120,19 +458,19 @@ int am_i_running_as_root()
*
* @return it returns the number of structures that was reset.
*/
-size_t zero_all_targets(struct target *root)
+size_t zero_all_targets(struct ebpf_target *root)
{
- struct target *w;
+ struct ebpf_target *w;
size_t count = 0;
for (w = root; w; w = w->next) {
count++;
if (unlikely(w->root_pid)) {
- struct pid_on_target *pid_on_target = w->root_pid;
+ struct ebpf_pid_on_target *pid_on_target = w->root_pid;
while (pid_on_target) {
- struct pid_on_target *pid_on_target_to_free = pid_on_target;
+ struct ebpf_pid_on_target *pid_on_target_to_free = pid_on_target;
pid_on_target = pid_on_target->next;
freez(pid_on_target_to_free);
}
@@ -149,9 +487,9 @@ size_t zero_all_targets(struct target *root)
*
* @param agrt the pointer to be cleaned.
*/
-void clean_apps_groups_target(struct target *agrt)
+void clean_apps_groups_target(struct ebpf_target *agrt)
{
- struct target *current_target;
+ struct ebpf_target *current_target;
while (agrt) {
current_target = agrt;
agrt = current_target->target;
@@ -170,7 +508,7 @@ void clean_apps_groups_target(struct target *agrt)
*
* @return It returns the target on success and NULL otherwise
*/
-struct target *get_apps_groups_target(struct target **agrt, const char *id, struct target *target, const char *name)
+struct ebpf_target *get_apps_groups_target(struct ebpf_target **agrt, const char *id, struct ebpf_target *target, const char *name)
{
int tdebug = 0, thidden = target ? target->hidden : 0, ends_with = 0;
const char *nid = id;
@@ -188,9 +526,9 @@ struct target *get_apps_groups_target(struct target **agrt, const char *id, stru
uint32_t hash = simple_hash(id);
// find if it already exists
- struct target *w, *last = *agrt;
+ struct ebpf_target *w, *last = *agrt;
for (w = *agrt; w; w = w->next) {
- if (w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0)
+ if (w->idhash == hash && strncmp(nid, w->id, EBPF_MAX_NAME) == 0)
return w;
last = w;
@@ -215,18 +553,18 @@ struct target *get_apps_groups_target(struct target **agrt, const char *id, stru
"Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id,
target->id, target->target->id);
- w = callocz(1, sizeof(struct target));
- strncpyz(w->id, nid, MAX_NAME);
+ w = callocz(1, sizeof(struct ebpf_target));
+ strncpyz(w->id, nid, EBPF_MAX_NAME);
w->idhash = simple_hash(w->id);
if (unlikely(!target))
// copy the name
- strncpyz(w->name, name, MAX_NAME);
+ strncpyz(w->name, name, EBPF_MAX_NAME);
else
// copy the id
- strncpyz(w->name, nid, MAX_NAME);
+ strncpyz(w->name, nid, EBPF_MAX_NAME);
- strncpyz(w->compare, nid, MAX_COMPARE_NAME);
+ strncpyz(w->compare, nid, EBPF_MAX_COMPARE_NAME);
size_t len = strlen(w->compare);
if (w->compare[len - 1] == '*') {
w->compare[len - 1] = '\0';
@@ -267,7 +605,7 @@ struct target *get_apps_groups_target(struct target **agrt, const char *id, stru
*
* @return It returns 0 on success and -1 otherwise
*/
-int ebpf_read_apps_groups_conf(struct target **agdt, struct target **agrt, const char *path, const char *file)
+int ebpf_read_apps_groups_conf(struct ebpf_target **agdt, struct ebpf_target **agrt, const char *path, const char *file)
{
char filename[FILENAME_MAX + 1];
@@ -297,7 +635,7 @@ int ebpf_read_apps_groups_conf(struct target **agdt, struct target **agrt, const
continue;
// find a possibly existing target
- struct target *w = NULL;
+ struct ebpf_target *w = NULL;
// loop through all words, skipping the first one (the name)
for (word = 0; word < words; word++) {
@@ -312,7 +650,7 @@ int ebpf_read_apps_groups_conf(struct target **agdt, struct target **agrt, const
continue;
// add this target
- struct target *n = get_apps_groups_target(agrt, s, w, name);
+ struct ebpf_target *n = get_apps_groups_target(agrt, s, w, name);
if (!n) {
error("Cannot create target '%s' (line %zu, word %zu)", s, line, word);
continue;
@@ -331,7 +669,7 @@ int ebpf_read_apps_groups_conf(struct target **agdt, struct target **agrt, const
if (!*agdt)
fatal("Cannot create default target");
- struct target *ptr = *agdt;
+ struct ebpf_target *ptr = *agdt;
if (ptr->target)
*agdt = ptr->target;
@@ -345,17 +683,15 @@ int ebpf_read_apps_groups_conf(struct target **agdt, struct target **agrt, const
// ----------------------------------------------------------------------------
// string lengths
-#define MAX_COMPARE_NAME 100
-#define MAX_NAME 100
#define MAX_CMDLINE 16384
-struct pid_stat **all_pids = NULL; // to avoid allocations, we pre-allocate the
+struct ebpf_pid_stat **ebpf_all_pids = NULL; // to avoid allocations, we pre-allocate the
// the entire pid space.
-struct pid_stat *root_of_pids = NULL; // global list of all processes running
+struct ebpf_pid_stat *ebpf_root_of_pids = NULL; // global list of all processes running
-size_t all_pids_count = 0; // the number of processes running
+size_t ebpf_all_pids_count = 0; // the number of processes running
-struct target
+struct ebpf_target
*apps_groups_default_target = NULL, // the default target
*apps_groups_root_target = NULL, // apps_groups.conf defined
*users_root_target = NULL, // users
@@ -416,7 +752,7 @@ static inline void debug_log_dummy(void)
*
* @return It returns the status value.
*/
-static inline int managed_log(struct pid_stat *p, uint32_t log, int status)
+static inline int managed_log(struct ebpf_pid_stat *p, uint32_t log, int status)
{
if (unlikely(!status)) {
// error("command failed log %u, errno %d", log, errno);
@@ -476,23 +812,23 @@ static inline int managed_log(struct pid_stat *p, uint32_t log, int status)
*
* @return It returns the pid entry structure
*/
-static inline struct pid_stat *get_pid_entry(pid_t pid)
+static inline struct ebpf_pid_stat *get_pid_entry(pid_t pid)
{
- if (unlikely(all_pids[pid]))
- return all_pids[pid];
+ if (unlikely(ebpf_all_pids[pid]))
+ return ebpf_all_pids[pid];
- struct pid_stat *p = callocz(1, sizeof(struct pid_stat));
+ struct ebpf_pid_stat *p = ebpf_pid_stat_get();
- if (likely(root_of_pids))
- root_of_pids->prev = p;
+ if (likely(ebpf_root_of_pids))
+ ebpf_root_of_pids->prev = p;
- p->next = root_of_pids;
- root_of_pids = p;
+ p->next = ebpf_root_of_pids;
+ ebpf_root_of_pids = p;
p->pid = pid;
- all_pids[pid] = p;
- all_pids_count++;
+ ebpf_all_pids[pid] = p;
+ ebpf_all_pids_count++;
return p;
}
@@ -502,14 +838,14 @@ static inline struct pid_stat *get_pid_entry(pid_t pid)
*
* @param p the pid_stat structure to assign for a target.
*/
-static inline void assign_target_to_pid(struct pid_stat *p)
+static inline void assign_target_to_pid(struct ebpf_pid_stat *p)
{
targets_assignment_counter++;
uint32_t hash = simple_hash(p->comm);
size_t pclen = strlen(p->comm);
- struct target *w;
+ struct ebpf_target *w;
for (w = apps_groups_root_target; w; w = w->next) {
// if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm);
@@ -543,11 +879,11 @@ static inline void assign_target_to_pid(struct pid_stat *p)
/**
* Read cmd line from /proc/PID/cmdline
*
- * @param p the pid_stat_structure.
+ * @param p the ebpf_pid_stat_structure.
*
* @return It returns 1 on success and 0 otherwise.
*/
-static inline int read_proc_pid_cmdline(struct pid_stat *p)
+static inline int read_proc_pid_cmdline(struct ebpf_pid_stat *p)
{
static char cmdline[MAX_CMDLINE + 1];
@@ -596,7 +932,7 @@ cleanup:
* @param p the pid stat structure to store the data.
* @param ptr an useless argument.
*/
-static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr)
+static inline int read_proc_pid_stat(struct ebpf_pid_stat *p, void *ptr)
{
UNUSED(ptr);
@@ -640,7 +976,7 @@ static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr)
debug_log("\tJust added %d (%s)", p->pid, comm);
}
- strncpyz(p->comm, comm, MAX_COMPARE_NAME);
+ strncpyz(p->comm, comm, EBPF_MAX_COMPARE_NAME);
// /proc/<pid>/cmdline
if (likely(proc_pid_cmdline_is_needed))
@@ -673,7 +1009,7 @@ static inline int collect_data_for_pid(pid_t pid, void *ptr)
return 0;
}
- struct pid_stat *p = get_pid_entry(pid);
+ struct ebpf_pid_stat *p = get_pid_entry(pid);
if (unlikely(!p || p->read))
return 0;
p->read = 1;
@@ -701,11 +1037,11 @@ static inline int collect_data_for_pid(pid_t pid, void *ptr)
*/
static inline void link_all_processes_to_their_parents(void)
{
- struct pid_stat *p, *pp;
+ struct ebpf_pid_stat *p, *pp;
// link all children to their parents
// and update children count on parents
- for (p = root_of_pids; p; p = p->next) {
+ for (p = ebpf_root_of_pids; p; p = p->next) {
// for each process found
p->sortlist = 0;
@@ -716,7 +1052,7 @@ static inline void link_all_processes_to_their_parents(void)
continue;
}
- pp = all_pids[p->ppid];
+ pp = ebpf_all_pids[p->ppid];
if (likely(pp)) {
p->parent = pp;
pp->children_count++;
@@ -738,7 +1074,7 @@ static inline void link_all_processes_to_their_parents(void)
*/
static void apply_apps_groups_targets_inheritance(void)
{
- struct pid_stat *p = NULL;
+ struct ebpf_pid_stat *p = NULL;
// children that do not have a target
// inherit their target from their parent
@@ -747,7 +1083,7 @@ static void apply_apps_groups_targets_inheritance(void)
if (unlikely(debug_enabled))
loops++;
found = 0;
- for (p = root_of_pids; p; p = p->next) {
+ for (p = ebpf_root_of_pids; p; p = p->next) {
// if this process does not have a target
// and it has a parent
// and its parent has a target
@@ -773,7 +1109,7 @@ static void apply_apps_groups_targets_inheritance(void)
loops++;
found = 0;
- for (p = root_of_pids; p; p = p->next) {
+ for (p = ebpf_root_of_pids; p; p = p->next) {
if (unlikely(!p->sortlist && !p->children_count))
p->sortlist = sortlist++;
@@ -809,17 +1145,17 @@ static void apply_apps_groups_targets_inheritance(void)
}
// init goes always to default target
- if (all_pids[INIT_PID])
- all_pids[INIT_PID]->target = apps_groups_default_target;
+ if (ebpf_all_pids[INIT_PID])
+ ebpf_all_pids[INIT_PID]->target = apps_groups_default_target;
// pid 0 goes always to default target
- if (all_pids[0])
- all_pids[0]->target = apps_groups_default_target;
+ if (ebpf_all_pids[0])
+ ebpf_all_pids[0]->target = apps_groups_default_target;
// give a default target on all top level processes
if (unlikely(debug_enabled))
loops++;
- for (p = root_of_pids; p; p = p->next) {
+ for (p = ebpf_root_of_pids; p; p = p->next) {
// if the process is not merged itself
// then is is a top level process
if (unlikely(!p->merged && !p->target))
@@ -830,8 +1166,8 @@ static void apply_apps_groups_targets_inheritance(void)
p->sortlist = sortlist++;
}
- if (all_pids[1])
- all_pids[1]->sortlist = sortlist++;
+ if (ebpf_all_pids[1])
+ ebpf_all_pids[1]->sortlist = sortlist++;
// give a target to all merged child processes
found = 1;
@@ -839,7 +1175,7 @@ static void apply_apps_groups_targets_inheritance(void)
if (unlikely(debug_enabled))
loops++;
found = 0;
- for (p = root_of_pids; p; p = p->next) {
+ for (p = ebpf_root_of_pids; p; p = p->next) {
if (unlikely(!p->target && p->merged && p->parent && p->parent->target)) {
p->target = p->parent->target;
found++;
@@ -860,9 +1196,9 @@ static void apply_apps_groups_targets_inheritance(void)
*
* @param root the targets that will be updated.
*/
-static inline void post_aggregate_targets(struct target *root)
+static inline void post_aggregate_targets(struct ebpf_target *root)
{
- struct target *w;
+ struct ebpf_target *w;
for (w = root; w; w = w->next) {
if (w->collected_starttime) {
if (!w->starttime || w->collected_starttime < w->starttime) {
@@ -881,7 +1217,7 @@ static inline void post_aggregate_targets(struct target *root)
*/
static inline void del_pid_entry(pid_t pid)
{
- struct pid_stat *p = all_pids[pid];
+ struct ebpf_pid_stat *p = ebpf_all_pids[pid];
if (unlikely(!p)) {
error("attempted to free pid %d that is not allocated.", pid);
@@ -890,8 +1226,8 @@ static inline void del_pid_entry(pid_t pid)
debug_log("process %d %s exited, deleting it.", pid, p->comm);
- if (root_of_pids == p)
- root_of_pids = p->next;
+ if (ebpf_root_of_pids == p)
+ ebpf_root_of_pids = p->next;
if (p->next)
p->next->prev = p->prev;
@@ -903,10 +1239,10 @@ static inline void del_pid_entry(pid_t pid)
freez(p->io_filename);
freez(p->cmdline_filename);
freez(p->cmdline);
- freez(p);
+ ebpf_pid_stat_release(p);
- all_pids[pid] = NULL;
- all_pids_count--;
+ ebpf_all_pids[pid] = NULL;
+ ebpf_all_pids_count--;
}
/**
@@ -921,9 +1257,9 @@ static inline void del_pid_entry(pid_t pid)
*/
int get_pid_comm(pid_t pid, size_t n, char *dest)
{
- struct pid_stat *stat;
+ struct ebpf_pid_stat *stat;
- stat = all_pids[pid];
+ stat = ebpf_all_pids[pid];
if (unlikely(stat == NULL)) {
return -1;
}
@@ -945,19 +1281,19 @@ void cleanup_variables_from_other_threads(uint32_t pid)
{
// Clean socket structures
if (socket_bandwidth_curr) {
- freez(socket_bandwidth_curr[pid]);
+ ebpf_socket_release(socket_bandwidth_curr[pid]);
socket_bandwidth_curr[pid] = NULL;
}
// Clean cachestat structure
if (cachestat_pid) {
- freez(cachestat_pid[pid]);
+ ebpf_cachestat_release(cachestat_pid[pid]);
cachestat_pid[pid] = NULL;
}
// Clean directory cache structure
if (dcstat_pid) {
- freez(dcstat_pid[pid]);
+ ebpf_dcstat_release(dcstat_pid[pid]);
dcstat_pid[pid] = NULL;
}
@@ -969,19 +1305,19 @@ void cleanup_variables_from_other_threads(uint32_t pid)
// Clean vfs structure
if (vfs_pid) {
- freez(vfs_pid[pid]);
+ ebpf_vfs_release(vfs_pid[pid]);
vfs_pid[pid] = NULL;
}
// Clean fd structure
if (fd_pid) {
- freez(fd_pid[pid]);
+ ebpf_fd_release(fd_pid[pid]);
fd_pid[pid] = NULL;
}
// Clean shm structure
if (shm_pid) {
- freez(shm_pid[pid]);
+ ebpf_shm_release(shm_pid[pid]);
shm_pid[pid] = NULL;
}
}
@@ -991,9 +1327,9 @@ void cleanup_variables_from_other_threads(uint32_t pid)
*/
void cleanup_exited_pids()
{
- struct pid_stat *p = NULL;
+ struct ebpf_pid_stat *p = NULL;
- for (p = root_of_pids; p;) {
+ for (p = ebpf_root_of_pids; p;) {
if (!p->updated && (!p->keep || p->keeploops > 0)) {
if (unlikely(debug_enabled && (p->keep || p->keeploops)))
debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
@@ -1002,12 +1338,9 @@ void cleanup_exited_pids()
p = p->next;
// Clean process structure
- freez(global_process_stats[r]);
+ ebpf_process_stat_release(global_process_stats[r]);
global_process_stats[r] = NULL;
- freez(current_apps_data[r]);
- current_apps_data[r] = NULL;
-
cleanup_variables_from_other_threads(r);
del_pid_entry(r);
@@ -1060,7 +1393,7 @@ static inline void read_proc_filesystem()
* @param p the pid with information to update
* @param o never used
*/
-static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o)
+static inline void aggregate_pid_on_target(struct ebpf_target *w, struct ebpf_pid_stat *p, struct ebpf_target *o)
{
UNUSED(o);
@@ -1075,7 +1408,7 @@ static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p,
}
w->processes++;
- struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target));
+ struct ebpf_pid_on_target *pid_on_target = mallocz(sizeof(struct ebpf_pid_on_target));
pid_on_target->pid = p->pid;
pid_on_target->next = w->root_pid;
w->root_pid = pid_on_target;
@@ -1091,10 +1424,10 @@ static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p,
*/
void collect_data_for_all_processes(int tbl_pid_stats_fd)
{
- if (unlikely(!all_pids))
+ if (unlikely(!ebpf_all_pids))
return;
- struct pid_stat *pids = root_of_pids; // global list of all processes running
+ struct ebpf_pid_stat *pids = ebpf_root_of_pids; // global list of all processes running
while (pids) {
if (pids->updated_twice) {
pids->read = 0; // mark it as not read, so that collect_data_for_pid() will read it
@@ -1113,24 +1446,21 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd)
read_proc_filesystem();
uint32_t key;
- pids = root_of_pids; // global list of all processes running
+ pids = ebpf_root_of_pids; // global list of all processes running
// while (bpf_map_get_next_key(tbl_pid_stats_fd, &key, &next_key) == 0) {
while (pids) {
key = pids->pid;
ebpf_process_stat_t *w = global_process_stats[key];
if (!w) {
- w = callocz(1, sizeof(ebpf_process_stat_t));
+ w = ebpf_process_stat_get();
global_process_stats[key] = w;
}
if (bpf_map_lookup_elem(tbl_pid_stats_fd, &key, w)) {
// Clean Process structures
- freez(w);
+ ebpf_process_stat_release(w);
global_process_stats[key] = NULL;
- freez(current_apps_data[key]);
- current_apps_data[key] = NULL;
-
cleanup_variables_from_other_threads(key);
pids = pids->next;
@@ -1148,7 +1478,7 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd)
// this has to be done, before the cleanup
// // concentrate everything on the targets
- for (pids = root_of_pids; pids; pids = pids->next)
+ for (pids = ebpf_root_of_pids; pids; pids = pids->next)
aggregate_pid_on_target(pids->target, pids, NULL);
post_aggregate_targets(apps_groups_root_target);
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
index 0bea9122f..d33442af5 100644
--- a/collectors/ebpf.plugin/ebpf_apps.h
+++ b/collectors/ebpf.plugin/ebpf_apps.h
@@ -3,7 +3,6 @@
#ifndef NETDATA_EBPF_APPS_H
#define NETDATA_EBPF_APPS_H 1
-#include "libnetdata/threads/threads.h"
#include "libnetdata/locks/locks.h"
#include "libnetdata/avl/avl.h"
#include "libnetdata/clocks/clocks.h"
@@ -34,92 +33,21 @@
#include "ebpf_swap.h"
#include "ebpf_vfs.h"
-#define MAX_COMPARE_NAME 100
-#define MAX_NAME 100
-
-// ----------------------------------------------------------------------------
-// process_pid_stat
-//
-// Fields read from the kernel ring for a specific PID
-//
-typedef struct process_pid_stat {
- uint64_t pid_tgid; // Unique identifier
- uint32_t pid; // process id
-
- // Count number of calls done for specific function
- uint32_t open_call;
- uint32_t write_call;
- uint32_t writev_call;
- uint32_t read_call;
- uint32_t readv_call;
- uint32_t unlink_call;
- uint32_t exit_call;
- uint32_t release_call;
- uint32_t fork_call;
- uint32_t clone_call;
- uint32_t close_call;
-
- // Count number of bytes written or read
- uint64_t write_bytes;
- uint64_t writev_bytes;
- uint64_t readv_bytes;
- uint64_t read_bytes;
-
- // Count number of errors for the specified function
- uint32_t open_err;
- uint32_t write_err;
- uint32_t writev_err;
- uint32_t read_err;
- uint32_t readv_err;
- uint32_t unlink_err;
- uint32_t fork_err;
- uint32_t clone_err;
- uint32_t close_err;
-} process_pid_stat_t;
-
-// ----------------------------------------------------------------------------
-// socket_bandwidth
-//
-// Fields read from the kernel ring for a specific PID
-//
-typedef struct socket_bandwidth {
- uint64_t first;
- uint64_t ct;
- uint64_t sent;
- uint64_t received;
- unsigned char removed;
-} socket_bandwidth_t;
+#define EBPF_MAX_COMPARE_NAME 100
+#define EBPF_MAX_NAME 100
// ----------------------------------------------------------------------------
// pid_stat
//
-// structure to store data for each process running
-// see: man proc for the description of the fields
-
-struct pid_fd {
- int fd;
-
-#ifndef __FreeBSD__
- ino_t inode;
- char *filename;
- uint32_t link_hash;
- size_t cache_iterations_counter;
- size_t cache_iterations_reset;
-#endif
-};
-
-struct target {
- char compare[MAX_COMPARE_NAME + 1];
+struct ebpf_target {
+ char compare[EBPF_MAX_COMPARE_NAME + 1];
uint32_t comparehash;
size_t comparelen;
- char id[MAX_NAME + 1];
+ char id[EBPF_MAX_NAME + 1];
uint32_t idhash;
- char name[MAX_NAME + 1];
-
- uid_t uid;
- gid_t gid;
+ char name[EBPF_MAX_NAME + 1];
// Changes made to simplify integration between apps and eBPF.
netdata_publish_cachestat_t cachestat;
@@ -129,58 +57,9 @@ struct target {
netdata_fd_stat_t fd;
netdata_publish_shm_t shm;
- /* These variables are not necessary for eBPF collector
- kernel_uint_t minflt;
- kernel_uint_t cminflt;
- kernel_uint_t majflt;
- kernel_uint_t cmajflt;
- kernel_uint_t utime;
- kernel_uint_t stime;
- kernel_uint_t gtime;
- kernel_uint_t cutime;
- kernel_uint_t cstime;
- kernel_uint_t cgtime;
- kernel_uint_t num_threads;
- // kernel_uint_t rss;
-
- kernel_uint_t status_vmsize;
- kernel_uint_t status_vmrss;
- kernel_uint_t status_vmshared;
- kernel_uint_t status_rssfile;
- kernel_uint_t status_rssshmem;
- kernel_uint_t status_vmswap;
-
- kernel_uint_t io_logical_bytes_read;
- kernel_uint_t io_logical_bytes_written;
- // kernel_uint_t io_read_calls;
- // kernel_uint_t io_write_calls;
- kernel_uint_t io_storage_bytes_read;
- kernel_uint_t io_storage_bytes_written;
- // kernel_uint_t io_cancelled_write_bytes;
-
- int *target_fds;
- int target_fds_size;
-
- kernel_uint_t openfiles;
- kernel_uint_t openpipes;
- kernel_uint_t opensockets;
- kernel_uint_t openinotifies;
- kernel_uint_t openeventfds;
- kernel_uint_t opentimerfds;
- kernel_uint_t opensignalfds;
- kernel_uint_t openeventpolls;
- kernel_uint_t openother;
- */
-
kernel_uint_t starttime;
kernel_uint_t collected_starttime;
- /*
- kernel_uint_t uptime_min;
- kernel_uint_t uptime_sum;
- kernel_uint_t uptime_max;
- */
-
unsigned int processes; // how many processes have been merged to this
int exposed; // if set, we have sent this to netdata
int hidden; // if set, we set the hidden flag on the dimension
@@ -189,20 +68,20 @@ struct target {
int starts_with; // if set, the compare string matches only the
// beginning of the command
- struct pid_on_target *root_pid; // list of aggregated pids for target debugging
+ struct ebpf_pid_on_target *root_pid; // list of aggregated pids for target debugging
- struct target *target; // the one that will be reported to netdata
- struct target *next;
+ struct ebpf_target *target; // the one that will be reported to netdata
+ struct ebpf_target *next;
};
-extern struct target *apps_groups_default_target;
-extern struct target *apps_groups_root_target;
-extern struct target *users_root_target;
-extern struct target *groups_root_target;
+extern struct ebpf_target *apps_groups_default_target;
+extern struct ebpf_target *apps_groups_root_target;
+extern struct ebpf_target *users_root_target;
+extern struct ebpf_target *groups_root_target;
-struct pid_stat {
+struct ebpf_pid_stat {
int32_t pid;
- char comm[MAX_COMPARE_NAME + 1];
+ char comm[EBPF_MAX_COMPARE_NAME + 1];
char *cmdline;
uint32_t log_thrown;
@@ -210,96 +89,6 @@ struct pid_stat {
// char state;
int32_t ppid;
- // int32_t pgrp;
- // int32_t session;
- // int32_t tty_nr;
- // int32_t tpgid;
- // uint64_t flags;
-
- /*
- // these are raw values collected
- kernel_uint_t minflt_raw;
- kernel_uint_t cminflt_raw;
- kernel_uint_t majflt_raw;
- kernel_uint_t cmajflt_raw;
- kernel_uint_t utime_raw;
- kernel_uint_t stime_raw;
- kernel_uint_t gtime_raw; // guest_time
- kernel_uint_t cutime_raw;
- kernel_uint_t cstime_raw;
- kernel_uint_t cgtime_raw; // cguest_time
-
- // these are rates
- kernel_uint_t minflt;
- kernel_uint_t cminflt;
- kernel_uint_t majflt;
- kernel_uint_t cmajflt;
- kernel_uint_t utime;
- kernel_uint_t stime;
- kernel_uint_t gtime;
- kernel_uint_t cutime;
- kernel_uint_t cstime;
- kernel_uint_t cgtime;
-
- // int64_t priority;
- // int64_t nice;
- int32_t num_threads;
- // int64_t itrealvalue;
- kernel_uint_t collected_starttime;
- // kernel_uint_t vsize;
- // kernel_uint_t rss;
- // kernel_uint_t rsslim;
- // kernel_uint_t starcode;
- // kernel_uint_t endcode;
- // kernel_uint_t startstack;
- // kernel_uint_t kstkesp;
- // kernel_uint_t kstkeip;
- // uint64_t signal;
- // uint64_t blocked;
- // uint64_t sigignore;
- // uint64_t sigcatch;
- // uint64_t wchan;
- // uint64_t nswap;
- // uint64_t cnswap;
- // int32_t exit_signal;
- // int32_t processor;
- // uint32_t rt_priority;
- // uint32_t policy;
- // kernel_uint_t delayacct_blkio_ticks;
-
- uid_t uid;
- gid_t gid;
-
- kernel_uint_t status_vmsize;
- kernel_uint_t status_vmrss;
- kernel_uint_t status_vmshared;
- kernel_uint_t status_rssfile;
- kernel_uint_t status_rssshmem;
- kernel_uint_t status_vmswap;
-#ifndef __FreeBSD__
- ARL_BASE *status_arl;
-#endif
-
- kernel_uint_t io_logical_bytes_read_raw;
- kernel_uint_t io_logical_bytes_written_raw;
- // kernel_uint_t io_read_calls_raw;
- // kernel_uint_t io_write_calls_raw;
- kernel_uint_t io_storage_bytes_read_raw;
- kernel_uint_t io_storage_bytes_written_raw;
- // kernel_uint_t io_cancelled_write_bytes_raw;
-
- kernel_uint_t io_logical_bytes_read;
- kernel_uint_t io_logical_bytes_written;
- // kernel_uint_t io_read_calls;
- // kernel_uint_t io_write_calls;
- kernel_uint_t io_storage_bytes_read;
- kernel_uint_t io_storage_bytes_written;
- // kernel_uint_t io_cancelled_write_bytes;
- */
-
- struct pid_fd *fds; // array of fds it uses
- size_t fds_size; // the size of the fds array
-
int children_count; // number of processes directly referencing this
unsigned char keep : 1; // 1 when we need to keep this process in memory even after it exited
int keeploops; // increases by 1 every time keep is 1 and updated 0
@@ -312,28 +101,21 @@ struct pid_stat {
// each process gets a unique number
- struct target *target; // app_groups.conf targets
- struct target *user_target; // uid based targets
- struct target *group_target; // gid based targets
+ struct ebpf_target *target; // app_groups.conf targets
+ struct ebpf_target *user_target; // uid based targets
+ struct ebpf_target *group_target; // gid based targets
usec_t stat_collected_usec;
usec_t last_stat_collected_usec;
- usec_t io_collected_usec;
- usec_t last_io_collected_usec;
-
- kernel_uint_t uptime;
-
- char *fds_dirname; // the full directory name in /proc/PID/fd
-
char *stat_filename;
char *status_filename;
char *io_filename;
char *cmdline_filename;
- struct pid_stat *parent;
- struct pid_stat *prev;
- struct pid_stat *next;
+ struct ebpf_pid_stat *parent;
+ struct ebpf_pid_stat *prev;
+ struct ebpf_pid_stat *next;
};
// ----------------------------------------------------------------------------
@@ -344,15 +126,15 @@ struct pid_stat {
//
// - Each entry in /etc/apps_groups.conf creates a target.
// - Each user and group used by a process in the system, creates a target.
-struct pid_on_target {
+struct ebpf_pid_on_target {
int32_t pid;
- struct pid_on_target *next;
+ struct ebpf_pid_on_target *next;
};
// ----------------------------------------------------------------------------
// Structures used to read information from kernel ring
typedef struct ebpf_process_stat {
- uint64_t pid_tgid;
+ uint64_t pid_tgid; // This cannot be removed, because it is used inside kernel ring.
uint32_t pid;
//Counter
@@ -406,16 +188,16 @@ static inline void debug_log_int(const char *fmt, ...)
// ----------------------------------------------------------------------------
// Exported variabled and functions
//
-extern struct pid_stat **all_pids;
+extern struct ebpf_pid_stat **ebpf_all_pids;
-int ebpf_read_apps_groups_conf(struct target **apps_groups_default_target,
- struct target **apps_groups_root_target,
- const char *path,
- const char *file);
+int ebpf_read_apps_groups_conf(struct ebpf_target **apps_groups_default_target,
+ struct ebpf_target **apps_groups_root_target,
+ const char *path,
+ const char *file);
-void clean_apps_groups_target(struct target *apps_groups_root_target);
+void clean_apps_groups_target(struct ebpf_target *apps_groups_root_target);
-size_t zero_all_targets(struct target *root);
+size_t zero_all_targets(struct ebpf_target *root);
int am_i_running_as_root();
@@ -427,15 +209,74 @@ int get_pid_comm(pid_t pid, size_t n, char *dest);
size_t read_processes_statistic_using_pid_on_target(ebpf_process_stat_t **ep,
int fd,
- struct pid_on_target *pids);
+ struct ebpf_pid_on_target *pids);
-size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct pid_on_target *pids);
+size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct ebpf_pid_on_target *pids);
void collect_data_for_all_processes(int tbl_pid_stats_fd);
extern ebpf_process_stat_t **global_process_stats;
-extern ebpf_process_publish_apps_t **current_apps_data;
extern netdata_publish_cachestat_t **cachestat_pid;
extern netdata_publish_dcstat_t **dcstat_pid;
+extern netdata_publish_swap_t **swap_pid;
+extern netdata_publish_vfs_t **vfs_pid;
+extern netdata_fd_stat_t **fd_pid;
+extern netdata_publish_shm_t **shm_pid;
+
+// The default value is at least 32 times smaller than maximum number of PIDs allowed on system,
+// this is only possible because we are using ARAL (https://github.com/netdata/netdata/tree/master/libnetdata/aral).
+#ifndef NETDATA_EBPF_ALLOC_MAX_PID
+# define NETDATA_EBPF_ALLOC_MAX_PID 1024
+#endif
+#define NETDATA_EBPF_ALLOC_MIN_ELEMENTS 256
+
+// ARAL Sectiion
+extern void ebpf_aral_init(void);
+
+extern ebpf_process_stat_t *ebpf_process_stat_get(void);
+extern void ebpf_process_stat_release(ebpf_process_stat_t *stat);
+
+extern ARAL *ebpf_aral_socket_pid;
+void ebpf_socket_aral_init();
+ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void);
+void ebpf_socket_release(ebpf_socket_publish_apps_t *stat);
+
+extern ARAL *ebpf_aral_cachestat_pid;
+void ebpf_cachestat_aral_init();
+netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void);
+void ebpf_cachestat_release(netdata_publish_cachestat_t *stat);
+
+extern ARAL *ebpf_aral_dcstat_pid;
+void ebpf_dcstat_aral_init();
+netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void);
+void ebpf_dcstat_release(netdata_publish_dcstat_t *stat);
+
+extern ARAL *ebpf_aral_vfs_pid;
+void ebpf_vfs_aral_init();
+netdata_publish_vfs_t *ebpf_vfs_get(void);
+void ebpf_vfs_release(netdata_publish_vfs_t *stat);
+
+extern ARAL *ebpf_aral_fd_pid;
+void ebpf_fd_aral_init();
+netdata_fd_stat_t *ebpf_fd_stat_get(void);
+void ebpf_fd_release(netdata_fd_stat_t *stat);
+
+extern ARAL *ebpf_aral_shm_pid;
+void ebpf_shm_aral_init();
+netdata_publish_shm_t *ebpf_shm_stat_get(void);
+void ebpf_shm_release(netdata_publish_shm_t *stat);
+
+// ARAL Section end
+
+// Threads integrated with apps
+extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
+// Threads integrated with apps
+
+#include "libnetdata/threads/threads.h"
+
+// ARAL variables
+extern ARAL *ebpf_aral_apps_pid_stat;
+extern ARAL *ebpf_aral_process_stat;
+#define NETDATA_EBPF_PROC_ARAL_NAME "ebpf_proc_stat"
#endif /* NETDATA_EBPF_APPS_H */
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index b21cc6103..b2b006dd3 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -3,8 +3,6 @@
#include "ebpf.h"
#include "ebpf_cachestat.h"
-netdata_publish_cachestat_t **cachestat_pid;
-
static char *cachestat_counter_dimension_name[NETDATA_CACHESTAT_END] = { "ratio", "dirty", "hit",
"miss" };
static netdata_syscall_stat_t cachestat_counter_aggregated_data[NETDATA_CACHESTAT_END];
@@ -46,10 +44,6 @@ static char *account_page[NETDATA_CACHESTAT_ACCOUNT_DIRTY_END] ={ "account_page_
"__set_page_dirty", "__folio_mark_dirty" };
#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/cachestat.skel.h" // BTF code
-
-static struct cachestat_bpf *bpf_obj = NULL;
-
/**
* Disable probe
*
@@ -333,20 +327,14 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf
static void ebpf_cachestat_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
- ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated);
-
freez(cachestat_vector);
freez(cachestat_values);
-#ifdef LIBBPF_MAJOR_VERSION
- if (bpf_obj)
- cachestat_bpf__destroy(bpf_obj);
-#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -502,7 +490,7 @@ static void cachestat_fill_pid(uint32_t current_pid, netdata_cachestat_pid_t *pu
{
netdata_publish_cachestat_t *curr = cachestat_pid[current_pid];
if (!curr) {
- curr = callocz(1, sizeof(netdata_publish_cachestat_t));
+ curr = ebpf_publish_cachestat_get();
cachestat_pid[current_pid] = curr;
cachestat_save_pid_values(curr, publish);
@@ -521,7 +509,7 @@ static void read_apps_table()
{
netdata_cachestat_pid_t *cv = cachestat_vector;
uint32_t key;
- struct pid_stat *pids = root_of_pids;
+ struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
size_t length = sizeof(netdata_cachestat_pid_t)*ebpf_nprocs;
while (pids) {
@@ -589,7 +577,7 @@ static void ebpf_update_cachestat_cgroup()
*/
void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- struct target *root = ptr;
+ struct ebpf_target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_RATIO_CHART,
"Hit ratio",
EBPF_COMMON_DIMENSION_PERCENTAGE,
@@ -694,7 +682,7 @@ static void cachestat_send_global(netdata_publish_cachestat_t *publish)
* @param publish output structure.
* @param root structure with listed IPs
*/
-void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct pid_on_target *root)
+void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct ebpf_pid_on_target *root)
{
memcpy(&publish->prev, &publish->current,sizeof(publish->current));
memset(&publish->current, 0, sizeof(publish->current));
@@ -720,9 +708,9 @@ void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct pid_on
*
* @param root the target list.
*/
-void ebpf_cache_send_apps_data(struct target *root)
+void ebpf_cache_send_apps_data(struct ebpf_target *root)
{
- struct target *w;
+ struct ebpf_target *w;
collected_number value;
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART);
@@ -1092,6 +1080,11 @@ static void cachestat_collector(ebpf_module_t *em)
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
ebpf_cache_send_apps_data(apps_groups_root_target);
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_cachestat_pid)
+ ebpf_send_data_aral_chart(ebpf_aral_cachestat_pid, em);
+#endif
+
if (cgroups)
ebpf_cachestat_send_cgroup_data(update_every);
@@ -1167,10 +1160,11 @@ static void ebpf_create_memory_charts(ebpf_module_t *em)
*/
static void ebpf_cachestat_allocate_global_vectors(int apps)
{
- if (apps)
+ if (apps) {
cachestat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_cachestat_t *));
-
- cachestat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_cachestat_pid_t));
+ ebpf_cachestat_aral_init();
+ cachestat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_cachestat_pid_t));
+ }
cachestat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
@@ -1232,11 +1226,11 @@ static int ebpf_cachestat_load_bpf(ebpf_module_t *em)
}
#ifdef LIBBPF_MAJOR_VERSION
else {
- bpf_obj = cachestat_bpf__open();
- if (!bpf_obj)
+ cachestat_bpf_obj = cachestat_bpf__open();
+ if (!cachestat_bpf_obj)
ret = -1;
else
- ret = ebpf_cachestat_load_and_attach(bpf_obj, em);
+ ret = ebpf_cachestat_load_and_attach(cachestat_bpf_obj, em);
}
#endif
@@ -1265,7 +1259,6 @@ void *ebpf_cachestat_thread(void *ptr)
ebpf_update_pid_table(&cachestat_maps[NETDATA_CACHESTAT_PID_STATS], em);
if (ebpf_cachestat_set_internal_value()) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endcachestat;
}
@@ -1273,7 +1266,6 @@ void *ebpf_cachestat_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_cachestat_load_bpf(em)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endcachestat;
}
@@ -1289,7 +1281,13 @@ void *ebpf_cachestat_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
ebpf_create_memory_charts(em);
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_cachestat_pid)
+ ebpf_statistic_create_aral_chart(NETDATA_EBPF_CACHESTAT_ARAL_NAME, em);
+#endif
+
pthread_mutex_unlock(&lock);
cachestat_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h
index 15b06511e..2c1f171c7 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.h
+++ b/collectors/ebpf.plugin/ebpf_cachestat.h
@@ -33,6 +33,9 @@
#define NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT "services.cachestat_hits"
#define NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT "services.cachestat_misses"
+// ARAL Name
+#define NETDATA_EBPF_CACHESTAT_ARAL_NAME "ebpf_cachestat"
+
// variables
enum cachestat_counters {
NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU,
@@ -82,6 +85,7 @@ typedef struct netdata_publish_cachestat {
} netdata_publish_cachestat_t;
void *ebpf_cachestat_thread(void *ptr);
+void ebpf_cachestat_release(netdata_publish_cachestat_t *stat);
extern struct config cachestat_config;
extern netdata_ebpf_targets_t cachestat_targets[];
diff --git a/collectors/ebpf.plugin/ebpf_cgroup.c b/collectors/ebpf.plugin/ebpf_cgroup.c
index 42c045368..6d7c555bd 100644
--- a/collectors/ebpf.plugin/ebpf_cgroup.c
+++ b/collectors/ebpf.plugin/ebpf_cgroup.c
@@ -6,6 +6,7 @@
#include "ebpf_cgroup.h"
ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL;
+static void *ebpf_mapped_memory = NULL;
int send_cgroup_chart = 0;
// --------------------------------------------------------------------------------------------------------------------
@@ -19,7 +20,7 @@ int send_cgroup_chart = 0;
* @param fd file descriptor returned after shm_open was called.
* @param length length of the shared memory
*
- * @return It returns a pointer to the region mapped.
+ * @return It returns a pointer to the region mapped on success and MAP_FAILED otherwise.
*/
static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
{
@@ -37,6 +38,16 @@ static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
}
/**
+ * Unmap Shared Memory
+ *
+ * Unmap shared memory used to integrate eBPF and cgroup plugin
+ */
+void ebpf_unmap_cgroup_shared_memory()
+{
+ munmap(ebpf_mapped_memory, shm_ebpf_cgroup.header->body_length);
+}
+
+/**
* Map cgroup shared memory
*
* Map cgroup shared memory from cgroup to plugin
@@ -56,40 +67,47 @@ void ebpf_map_cgroup_shared_memory()
limit_try++;
next_try = curr_time + NETDATA_EBPF_CGROUP_NEXT_TRY_SEC;
- shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
if (shm_fd_ebpf_cgroup < 0) {
- if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
- error("Shared memory was not initialized, integration between processes won't happen.");
+ shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
+ if (shm_fd_ebpf_cgroup < 0) {
+ if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
+ error("Shared memory was not initialized, integration between processes won't happen.");
- return;
+ return;
+ }
}
// Map only header
- shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
- sizeof(netdata_ebpf_cgroup_shm_header_t));
- if (!shm_ebpf_cgroup.header) {
- limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
+ void *mapped = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
+ sizeof(netdata_ebpf_cgroup_shm_header_t));
+ if (unlikely(mapped == SEM_FAILED)) {
return;
}
+ netdata_ebpf_cgroup_shm_header_t *header = mapped;
- size_t length = shm_ebpf_cgroup.header->body_length;
+ size_t length = header->body_length;
- munmap(shm_ebpf_cgroup.header, sizeof(netdata_ebpf_cgroup_shm_header_t));
+ munmap(header, sizeof(netdata_ebpf_cgroup_shm_header_t));
- shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
- if (!shm_ebpf_cgroup.header) {
- limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
+ if (length <= ((sizeof(netdata_ebpf_cgroup_shm_header_t) + sizeof(netdata_ebpf_cgroup_shm_body_t)))) {
return;
}
- shm_ebpf_cgroup.body = (netdata_ebpf_cgroup_shm_body_t *) ((char *)shm_ebpf_cgroup.header +
- sizeof(netdata_ebpf_cgroup_shm_header_t));
+
+ ebpf_mapped_memory = (void *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
+ if (unlikely(ebpf_mapped_memory == MAP_FAILED)) {
+ return;
+ }
+ shm_ebpf_cgroup.header = ebpf_mapped_memory;
+ shm_ebpf_cgroup.body = ebpf_mapped_memory + sizeof(netdata_ebpf_cgroup_shm_header_t);
shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1);
if (shm_sem_ebpf_cgroup == SEM_FAILED) {
error("Cannot create semaphore, integration between eBPF and cgroup won't happen");
- munmap(shm_ebpf_cgroup.header, length);
+ limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
+ munmap(ebpf_mapped_memory, length);
shm_ebpf_cgroup.header = NULL;
+ shm_ebpf_cgroup.body = NULL;
close(shm_fd_ebpf_cgroup);
shm_fd_ebpf_cgroup = -1;
shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
@@ -258,32 +276,38 @@ void ebpf_reset_updated_var()
void ebpf_parse_cgroup_shm_data()
{
static int previous = 0;
- if (shm_ebpf_cgroup.header) {
- sem_wait(shm_sem_ebpf_cgroup);
- int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
+ if (!shm_ebpf_cgroup.header || shm_sem_ebpf_cgroup == SEM_FAILED)
+ return;
- pthread_mutex_lock(&mutex_cgroup_shm);
+ sem_wait(shm_sem_ebpf_cgroup);
+ int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
+ if (end <= 0) {
+ sem_post(shm_sem_ebpf_cgroup);
+ return;
+ }
- ebpf_remove_cgroup_target_update_list();
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_remove_cgroup_target_update_list();
- ebpf_reset_updated_var();
+ ebpf_reset_updated_var();
- for (i = 0; i < end; i++) {
- netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
- if (ptr->enabled) {
- ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr);
- ebpf_update_pid_link_list(ect, ptr->path);
- }
+ for (i = 0; i < end; i++) {
+ netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
+ if (ptr->enabled) {
+ ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr);
+ ebpf_update_pid_link_list(ect, ptr->path);
}
- send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count;
- previous = shm_ebpf_cgroup.header->cgroup_root_count;
+ }
+ send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count;
+ previous = shm_ebpf_cgroup.header->cgroup_root_count;
+ sem_post(shm_sem_ebpf_cgroup);
+ pthread_mutex_unlock(&mutex_cgroup_shm);
#ifdef NETDATA_DEV_MODE
- error("Updating cgroup %d (Previous: %d, Current: %d)", send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count);
+ info("Updating cgroup %d (Previous: %d, Current: %d)",
+ send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count);
#endif
- pthread_mutex_unlock(&mutex_cgroup_shm);
- sem_post(shm_sem_ebpf_cgroup);
- }
+ sem_post(shm_sem_ebpf_cgroup);
}
// --------------------------------------------------------------------------------------------------------------------
@@ -315,3 +339,54 @@ void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *fam
fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
}
}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Cgroup main thread
+
+/**
+ * CGROUP exit
+ *
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_cgroup_exit(void *ptr)
+{
+ UNUSED(ptr);
+}
+
+/**
+ * Cgroup integratin
+ *
+ * Thread responsible to call functions responsible to sync data between plugins.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_cgroup_integration(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_cgroup_exit, ptr);
+
+ usec_t step = USEC_PER_SEC;
+ int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ //Plugin will be killed when it receives a signal
+ while (!ebpf_exit_plugin) {
+ (void)heartbeat_next(&hb, step);
+
+ // We are using a small heartbeat time to wake up thread,
+ // but we should not update so frequently the shared memory data
+ if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) {
+ counter = 0;
+ if (!shm_ebpf_cgroup.header)
+ ebpf_map_cgroup_shared_memory();
+ else
+ ebpf_parse_cgroup_shm_data();
+ }
+ }
+
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_cgroup.h b/collectors/ebpf.plugin/ebpf_cgroup.h
index 19da7fca9..6620ea10a 100644
--- a/collectors/ebpf.plugin/ebpf_cgroup.h
+++ b/collectors/ebpf.plugin/ebpf_cgroup.h
@@ -64,6 +64,8 @@ void ebpf_map_cgroup_shared_memory();
void ebpf_parse_cgroup_shm_data();
void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
char *algorithm, char *context, char *module, int update_every);
+void *ebpf_cgroup_integration(void *ptr);
+void ebpf_unmap_cgroup_shared_memory();
extern int send_cgroup_chart;
#endif /* NETDATA_EBPF_CGROUP_H */
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
index 75e83214a..5f1400601 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -8,7 +8,6 @@ static netdata_syscall_stat_t dcstat_counter_aggregated_data[NETDATA_DCSTAT_IDX_
static netdata_publish_syscall_t dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_END];
netdata_dcstat_pid_t *dcstat_vector = NULL;
-netdata_publish_dcstat_t **dcstat_pid = NULL;
static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END];
static netdata_idx_t *dcstat_values = NULL;
@@ -45,10 +44,6 @@ netdata_ebpf_targets_t dc_targets[] = { {.name = "lookup_fast", .mode = EBPF_LOA
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/dc.skel.h" // BTF code
-
-static struct dc_bpf *bpf_obj = NULL;
-
/**
* Disable probe
*
@@ -294,23 +289,16 @@ void ebpf_dcstat_clean_names()
static void ebpf_dcstat_free(ebpf_module_t *em )
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
freez(dcstat_vector);
freez(dcstat_values);
- ebpf_cleanup_publish_syscall(dcstat_counter_publish_aggregated);
-
ebpf_dcstat_clean_names();
-#ifdef LIBBPF_MAJOR_VERSION
- if (bpf_obj)
- dc_bpf__destroy(bpf_obj);
-#endif
-
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -342,7 +330,7 @@ static void ebpf_dcstat_exit(void *ptr)
*/
void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- struct target *root = ptr;
+ struct ebpf_target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_DC_HIT_CHART,
"Percentage of files inside directory cache",
EBPF_COMMON_DIMENSION_PERCENTAGE,
@@ -432,7 +420,7 @@ static void dcstat_fill_pid(uint32_t current_pid, netdata_dcstat_pid_t *publish)
{
netdata_publish_dcstat_t *curr = dcstat_pid[current_pid];
if (!curr) {
- curr = callocz(1, sizeof(netdata_publish_dcstat_t));
+ curr = ebpf_publish_dcstat_get();
dcstat_pid[current_pid] = curr;
}
@@ -448,7 +436,7 @@ static void read_apps_table()
{
netdata_dcstat_pid_t *cv = dcstat_vector;
uint32_t key;
- struct pid_stat *pids = root_of_pids;
+ struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
size_t length = sizeof(netdata_dcstat_pid_t)*ebpf_nprocs;
while (pids) {
@@ -540,7 +528,7 @@ static void ebpf_dc_read_global_table()
* @param publish output structure.
* @param root structure with listed IPs
*/
-void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct pid_on_target *root)
+void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct ebpf_pid_on_target *root)
{
memset(&publish->curr, 0, sizeof(netdata_dcstat_pid_t));
netdata_dcstat_pid_t *dst = &publish->curr;
@@ -563,9 +551,9 @@ void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct pid_on_targe
*
* @param root the target list.
*/
-void ebpf_dcache_send_apps_data(struct target *root)
+void ebpf_dcache_send_apps_data(struct ebpf_target *root)
{
- struct target *w;
+ struct ebpf_target *w;
collected_number value;
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_DC_HIT_CHART);
@@ -1009,6 +997,11 @@ static void dcstat_collector(ebpf_module_t *em)
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
ebpf_dcache_send_apps_data(apps_groups_root_target);
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_dcstat_pid)
+ ebpf_send_data_aral_chart(ebpf_aral_dcstat_pid, em);
+#endif
+
if (cgroups)
ebpf_dc_send_cgroup_data(update_every);
@@ -1064,10 +1057,12 @@ static void ebpf_create_filesystem_charts(int update_every)
*/
static void ebpf_dcstat_allocate_global_vectors(int apps)
{
- if (apps)
+ if (apps) {
+ ebpf_dcstat_aral_init();
dcstat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_dcstat_t *));
+ dcstat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_dcstat_pid_t));
+ }
- dcstat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_dcstat_pid_t));
dcstat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
memset(dcstat_counter_aggregated_data, 0, NETDATA_DCSTAT_IDX_END * sizeof(netdata_syscall_stat_t));
@@ -1099,11 +1094,11 @@ static int ebpf_dcstat_load_bpf(ebpf_module_t *em)
}
#ifdef LIBBPF_MAJOR_VERSION
else {
- bpf_obj = dc_bpf__open();
- if (!bpf_obj)
+ dc_bpf_obj = dc_bpf__open();
+ if (!dc_bpf_obj)
ret = -1;
else
- ret = ebpf_dc_load_and_attach(bpf_obj, em);
+ ret = ebpf_dc_load_and_attach(dc_bpf_obj, em);
}
#endif
@@ -1137,7 +1132,6 @@ void *ebpf_dcstat_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_dcstat_load_bpf(em)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto enddcstat;
}
@@ -1155,6 +1149,12 @@ void *ebpf_dcstat_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_filesystem_charts(em->update_every);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_dcstat_pid)
+ ebpf_statistic_create_aral_chart(NETDATA_EBPF_DCSTAT_ARAL_NAME, em);
+#endif
+
pthread_mutex_unlock(&lock);
dcstat_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h
index 201fc8a02..5c9eed4d6 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.h
+++ b/collectors/ebpf.plugin/ebpf_dcstat.h
@@ -28,6 +28,9 @@
#define NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT "services.dc_not_cache"
#define NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT "services.dc_not_found"
+// ARAL name
+#define NETDATA_EBPF_DCSTAT_ARAL_NAME "ebpf_dcstat"
+
enum directory_cache_indexes {
NETDATA_DCSTAT_IDX_RATIO,
NETDATA_DCSTAT_IDX_REFERENCE,
@@ -75,6 +78,7 @@ typedef struct netdata_publish_dcstat {
void *ebpf_dcstat_thread(void *ptr);
void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr);
+void ebpf_dcstat_release(netdata_publish_dcstat_t *stat);
extern struct config dcstat_config;
extern netdata_ebpf_targets_t dc_targets[];
extern ebpf_local_maps_t dcstat_maps[];
diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c
index 5e7e2599d..e1a579441 100644
--- a/collectors/ebpf.plugin/ebpf_disk.c
+++ b/collectors/ebpf.plugin/ebpf_disk.c
@@ -429,7 +429,7 @@ static void ebpf_cleanup_disk_list()
static void ebpf_disk_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_disk_disable_tracepoints();
@@ -444,7 +444,7 @@ static void ebpf_disk_free(ebpf_module_t *em)
ebpf_cleanup_disk_list();
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -761,25 +761,21 @@ void *ebpf_disk_thread(void *ptr)
em->maps = disk_maps;
if (ebpf_disk_enable_tracepoints()) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto enddisk;
}
avl_init_lock(&disk_tree, ebpf_compare_disks);
if (read_local_disks()) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto enddisk;
}
if (pthread_mutex_init(&plot_mutex, NULL)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
error("Cannot initialize local mutex");
goto enddisk;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto enddisk;
}
@@ -792,6 +788,7 @@ void *ebpf_disk_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, disk_maps);
pthread_mutex_unlock(&lock);
disk_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c
index 79537066c..96da91b0a 100644
--- a/collectors/ebpf.plugin/ebpf_fd.c
+++ b/collectors/ebpf.plugin/ebpf_fd.c
@@ -36,17 +36,12 @@ static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER];
static netdata_idx_t *fd_values = NULL;
netdata_fd_stat_t *fd_vector = NULL;
-netdata_fd_stat_t **fd_pid = NULL;
netdata_ebpf_targets_t fd_targets[] = { {.name = "open", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "close", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/fd.skel.h" // BTF code
-
-static struct fd_bpf *bpf_obj = NULL;
-
/**
* Disable probe
*
@@ -364,20 +359,14 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
static void ebpf_fd_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
- ebpf_cleanup_publish_syscall(fd_publish_aggregated);
freez(fd_values);
freez(fd_vector);
-#ifdef LIBBPF_MAJOR_VERSION
- if (bpf_obj)
- fd_bpf__destroy(bpf_obj);
-#endif
-
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -479,7 +468,7 @@ static void fd_fill_pid(uint32_t current_pid, netdata_fd_stat_t *publish)
{
netdata_fd_stat_t *curr = fd_pid[current_pid];
if (!curr) {
- curr = callocz(1, sizeof(netdata_fd_stat_t));
+ curr = ebpf_fd_stat_get();
fd_pid[current_pid] = curr;
}
@@ -495,7 +484,7 @@ static void read_apps_table()
{
netdata_fd_stat_t *fv = fd_vector;
uint32_t key;
- struct pid_stat *pids = root_of_pids;
+ struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
size_t length = sizeof(netdata_fd_stat_t) * ebpf_nprocs;
while (pids) {
@@ -560,7 +549,7 @@ static void ebpf_update_fd_cgroup()
* @param fd the output
* @param root list of pids
*/
-static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct pid_on_target *root)
+static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct ebpf_pid_on_target *root)
{
uint32_t open_call = 0;
uint32_t close_call = 0;
@@ -593,9 +582,9 @@ static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct pid_on_target *root)
* @param em the structure with thread information
* @param root the target list.
*/
-void ebpf_fd_send_apps_data(ebpf_module_t *em, struct target *root)
+void ebpf_fd_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
{
- struct target *w;
+ struct ebpf_target *w;
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
ebpf_fd_sum_pids(&w->fd, w->root_pid);
@@ -685,7 +674,7 @@ static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5400,
ebpf_create_global_dimension,
&fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files",
@@ -695,7 +684,7 @@ static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
ebpf_create_global_dimension,
&fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN],
1, em->update_every,
- NETDATA_EBPF_MODULE_NAME_SWAP);
+ NETDATA_EBPF_MODULE_NAME_FD);
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed",
@@ -704,7 +693,7 @@ static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5402,
ebpf_create_global_dimension,
&fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files",
@@ -714,7 +703,7 @@ static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
ebpf_create_global_dimension,
&fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE],
1, em->update_every,
- NETDATA_EBPF_MODULE_NAME_SWAP);
+ NETDATA_EBPF_MODULE_NAME_FD);
}
}
@@ -797,28 +786,28 @@ static void ebpf_create_systemd_fd_charts(ebpf_module_t *em)
EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED, 20061,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+ NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files",
EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED, 20062,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+ NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
}
ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed",
EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED, 20063,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+ NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files",
EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED, 20064,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+ NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
}
}
@@ -939,6 +928,11 @@ static void fd_collector(ebpf_module_t *em)
if (apps)
read_apps_table();
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_fd_pid)
+ ebpf_send_data_aral_chart(ebpf_aral_fd_pid, em);
+#endif
+
if (cgroups)
ebpf_update_fd_cgroup();
@@ -972,7 +966,7 @@ static void fd_collector(ebpf_module_t *em)
*/
void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- struct target *root = ptr;
+ struct ebpf_target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN,
"Number of open files",
EBPF_COMMON_DIMENSION_CALL,
@@ -980,7 +974,7 @@ void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20061,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR,
@@ -990,7 +984,7 @@ void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20062,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
}
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSED,
@@ -1000,7 +994,7 @@ void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20063,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
if (em->mode < MODE_ENTRY) {
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR,
@@ -1010,7 +1004,7 @@ void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20064,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
}
em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
@@ -1070,10 +1064,11 @@ static void ebpf_create_fd_global_charts(ebpf_module_t *em)
*/
static void ebpf_fd_allocate_global_vectors(int apps)
{
- if (apps)
+ if (apps) {
+ ebpf_fd_aral_init();
fd_pid = callocz((size_t)pid_max, sizeof(netdata_fd_stat_t *));
-
- fd_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_fd_stat_t));
+ fd_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_fd_stat_t));
+ }
fd_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
}
@@ -1092,17 +1087,16 @@ static int ebpf_fd_load_bpf(ebpf_module_t *em)
if (em->load & EBPF_LOAD_LEGACY) {
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- em->enabled = CONFIG_BOOLEAN_NO;
ret = -1;
}
}
#ifdef LIBBPF_MAJOR_VERSION
else {
- bpf_obj = fd_bpf__open();
- if (!bpf_obj)
+ fd_bpf_obj = fd_bpf__open();
+ if (!fd_bpf_obj)
ret = -1;
else
- ret = ebpf_fd_load_and_attach(bpf_obj, em);
+ ret = ebpf_fd_load_and_attach(fd_bpf_obj, em);
}
#endif
@@ -1132,7 +1126,6 @@ void *ebpf_fd_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_fd_load_bpf(em)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endfd;
}
@@ -1148,6 +1141,12 @@ void *ebpf_fd_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_fd_global_charts(em);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_fd_pid)
+ ebpf_statistic_create_aral_chart(NETDATA_EBPF_FD_ARAL_NAME, em);
+#endif
+
pthread_mutex_unlock(&lock);
fd_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_fd.h b/collectors/ebpf.plugin/ebpf_fd.h
index e6545d79c..85dfd36ea 100644
--- a/collectors/ebpf.plugin/ebpf_fd.h
+++ b/collectors/ebpf.plugin/ebpf_fd.h
@@ -33,6 +33,9 @@
#define NETDATA_SYSTEMD_FD_CLOSE_CONTEXT "services.fd_close"
#define NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT "services.fd_close_error"
+// ARAL name
+#define NETDATA_EBPF_FD_ARAL_NAME "ebpf_fd"
+
typedef struct netdata_fd_stat {
uint32_t open_call; // Open syscalls (open and openat)
uint32_t close_call; // Close syscall (close)
@@ -80,8 +83,8 @@ enum fd_close_syscall {
void *ebpf_fd_thread(void *ptr);
void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr);
+void ebpf_fd_release(netdata_fd_stat_t *stat);
extern struct config fd_config;
-extern netdata_fd_stat_t **fd_pid;
extern netdata_ebpf_targets_t fd_targets[];
#endif /* NETDATA_EBPF_FD_H */
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c
index 5250ed8af..f8b28195c 100644
--- a/collectors/ebpf.plugin/ebpf_filesystem.c
+++ b/collectors/ebpf.plugin/ebpf_filesystem.c
@@ -92,7 +92,7 @@ static void ebpf_obsolete_fs_charts(int update_every)
static void ebpf_create_fs_charts(int update_every)
{
static int order = NETDATA_CHART_PRIO_EBPF_FILESYSTEM_CHARTS;
- char chart_name[64], title[256], family[64];
+ char chart_name[64], title[256], family[64], ctx[64];
int i;
uint32_t test = NETDATA_FILESYSTEM_FLAG_CHART_CREATED|NETDATA_FILESYSTEM_REMOVE_CHARTS;
for (i = 0; localfs[i].filesystem; i++) {
@@ -110,7 +110,7 @@ static void ebpf_create_fs_charts(int update_every)
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
title,
EBPF_COMMON_DIMENSION_CALL, family,
- NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ "filesystem.read_latency", NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
order++;
@@ -123,7 +123,7 @@ static void ebpf_create_fs_charts(int update_every)
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
title,
EBPF_COMMON_DIMENSION_CALL, family,
- NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ "filesystem.write_latency", NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
order++;
@@ -136,7 +136,7 @@ static void ebpf_create_fs_charts(int update_every)
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name,
title,
EBPF_COMMON_DIMENSION_CALL, family,
- NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ "filesystem.open_latency", NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
order++;
@@ -144,12 +144,13 @@ static void ebpf_create_fs_charts(int update_every)
char *type = (efp->flags & NETDATA_FILESYSTEM_ATTR_CHARTS) ? "attribute" : "sync";
snprintfz(title, 255, "%s latency for each %s request.", efp->filesystem, type);
snprintfz(chart_name, 63, "%s_%s_latency", efp->filesystem, type);
+ snprintfz(ctx, 63, "filesystem.%s_latency", type);
efp->hadditional.name = strdupz(chart_name);
efp->hadditional.title = strdupz(title);
efp->hadditional.order = order;
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name, title,
EBPF_COMMON_DIMENSION_CALL, family,
- NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ ctx, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
order++;
@@ -182,6 +183,9 @@ int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em)
return -1;
}
efp->flags |= NETDATA_FILESYSTEM_FLAG_HAS_PARTITION;
+ pthread_mutex_lock(&lock);
+ ebpf_update_kernel_memory(&plugin_statistics, &fs_maps[i], EBPF_ACTION_STAT_ADD);
+ pthread_mutex_unlock(&lock);
// Nedeed for filesystems like btrfs
if ((efp->flags & NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE) && (efp->addresses.function)) {
@@ -326,18 +330,16 @@ void ebpf_filesystem_cleanup_ebpf_data()
static void ebpf_filesystem_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
- ebpf_cleanup_publish_syscall(filesystem_publish_aggregated);
-
ebpf_filesystem_cleanup_ebpf_data();
if (dimensions)
ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS);
freez(filesystem_hash_values);
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -567,7 +569,6 @@ void *ebpf_filesystem_thread(void *ptr)
if (em->optional)
info("Netdata cannot monitor the filesystems used on this host.");
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endfilesystem;
}
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c
index 20c4b9d05..b4d49dc00 100644
--- a/collectors/ebpf.plugin/ebpf_hardirq.c
+++ b/collectors/ebpf.plugin/ebpf_hardirq.c
@@ -129,11 +129,54 @@ static hardirq_static_val_t hardirq_static_vals[] = {
// thread will write to netdata agent.
static avl_tree_lock hardirq_pub;
-// tmp store for dynamic hard IRQ values we get from a per-CPU eBPF map.
-static hardirq_ebpf_val_t *hardirq_ebpf_vals = NULL;
+/*****************************************************************
+ *
+ * ARAL SECTION
+ *
+ *****************************************************************/
+
+// ARAL vectors used to speed up processing
+ARAL *ebpf_aral_hardirq = NULL;
+
+/**
+ * eBPF hardirq Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+static inline void ebpf_hardirq_aral_init()
+{
+ ebpf_aral_hardirq = ebpf_allocate_pid_aral(NETDATA_EBPF_HARDIRQ_ARAL_NAME, sizeof(hardirq_val_t));
+}
-// tmp store for static hard IRQ values we get from a per-CPU eBPF map.
-static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL;
+/**
+ * eBPF hardirq get
+ *
+ * Get a hardirq_val_t entry to be used with a specific IRQ.
+ *
+ * @return it returns the address on success.
+ */
+hardirq_val_t *ebpf_hardirq_get(void)
+{
+ hardirq_val_t *target = aral_mallocz(ebpf_aral_hardirq);
+ memset(target, 0, sizeof(hardirq_val_t));
+ return target;
+}
+
+/**
+ * eBPF hardirq release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_hardirq_release(hardirq_val_t *stat)
+{
+ aral_freez(ebpf_aral_hardirq, stat);
+}
+
+/*****************************************************************
+ *
+ * EXIT FUNCTIONS
+ *
+ *****************************************************************/
/**
* Hardirq Free
@@ -144,18 +187,11 @@ static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL;
*/
static void ebpf_hardirq_free(ebpf_module_t *em)
{
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) {
ebpf_disable_tracepoint(&hardirq_tracepoints[i]);
}
- freez(hardirq_ebpf_vals);
- freez(hardirq_ebpf_static_vals);
-
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -200,8 +236,84 @@ static int hardirq_val_cmp(void *a, void *b)
}
}
-static void hardirq_read_latency_map(int mapfd)
+/**
+ * Parse interrupts
+ *
+ * Parse /proc/interrupts to get names used in metrics
+ *
+ * @param irq_name vector to store data.
+ * @param irq irq value
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+static int hardirq_parse_interrupts(char *irq_name, int irq)
{
+ static procfile *ff = NULL;
+ static int cpus = -1;
+ if(unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/interrupts");
+ ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ }
+ if(unlikely(!ff))
+ return -1;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return -1; // we return 0, so that we will retry to open it next time
+
+ size_t words = procfile_linewords(ff, 0);
+ if(unlikely(cpus == -1)) {
+ uint32_t w;
+ cpus = 0;
+ for(w = 0; w < words ; w++) {
+ if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0))
+ cpus++;
+ }
+ }
+
+ size_t lines = procfile_lines(ff), l;
+ if(unlikely(!lines)) {
+ collector_error("Cannot read /proc/interrupts, zero lines reported.");
+ return -1;
+ }
+
+ for(l = 1; l < lines ;l++) {
+ words = procfile_linewords(ff, l);
+ if(unlikely(!words)) continue;
+ const char *id = procfile_lineword(ff, l, 0);
+ if (!isdigit(id[0]))
+ continue;
+
+ int cmp = str2i(id);
+ if (cmp != irq)
+ continue;
+
+ if(unlikely((uint32_t)(cpus + 2) < words)) {
+ const char *name = procfile_lineword(ff, l, words - 1);
+ // On some motherboards IRQ can have the same name, so we append IRQ id to differentiate.
+ snprintfz(irq_name, NETDATA_HARDIRQ_NAME_LEN - 1, "%d_%s", irq, name);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Read Latency MAP
+ *
+ * Read data from kernel ring to user ring.
+ *
+ * @param mapfd hash map id.
+ *
+ * @return it returns 0 on success and -1 otherwise
+ */
+static int hardirq_read_latency_map(int mapfd)
+{
+ static hardirq_ebpf_static_val_t *hardirq_ebpf_vals = NULL;
+ if (!hardirq_ebpf_vals)
+ hardirq_ebpf_vals = callocz(ebpf_nprocs + 1, sizeof(hardirq_ebpf_static_val_t));
+
hardirq_ebpf_key_t key = {};
hardirq_ebpf_key_t next_key = {};
hardirq_val_t search_v = {};
@@ -234,7 +346,7 @@ static void hardirq_read_latency_map(int mapfd)
if (unlikely(v == NULL)) {
// latency/name can only be added reliably at a later time.
// when they're added, only then will we AVL insert.
- v = callocz(1, sizeof(hardirq_val_t));
+ v = ebpf_hardirq_get();
v->irq = key.irq;
v->dim_exists = false;
@@ -246,22 +358,10 @@ static void hardirq_read_latency_map(int mapfd)
// 2. the name is unfortunately *not* available on all CPU maps - only
// a single map contains the name, so we must find it. we only need
// to copy it though if the IRQ is new for us.
- bool name_saved = false;
uint64_t total_latency = 0;
int i;
- int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
- for (i = 0; i < end; i++) {
+ for (i = 0; i < ebpf_nprocs; i++) {
total_latency += hardirq_ebpf_vals[i].latency/1000;
-
- // copy name for new IRQs.
- if (v_is_new && !name_saved && hardirq_ebpf_vals[i].name[0] != '\0') {
- strncpyz(
- v->name,
- hardirq_ebpf_vals[i].name,
- NETDATA_HARDIRQ_NAME_LEN
- );
- name_saved = true;
- }
}
// can now safely publish latency for existing IRQs.
@@ -269,6 +369,11 @@ static void hardirq_read_latency_map(int mapfd)
// can now safely publish new IRQ.
if (v_is_new) {
+ if (hardirq_parse_interrupts(v->name, v->irq)) {
+ ebpf_hardirq_release(v);
+ return -1;
+ }
+
avl_t *check = avl_insert_lock(&hardirq_pub, (avl_t *)v);
if (check != (avl_t *)v) {
error("Internal error, cannot insert the AVL tree.");
@@ -277,10 +382,16 @@ static void hardirq_read_latency_map(int mapfd)
key = next_key;
}
+
+ return 0;
}
static void hardirq_read_latency_static_map(int mapfd)
{
+ static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL;
+ if (!hardirq_ebpf_static_vals)
+ hardirq_ebpf_static_vals = callocz(ebpf_nprocs + 1, sizeof(hardirq_ebpf_static_val_t));
+
uint32_t i;
for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) {
uint32_t map_i = hardirq_static_vals[i].idx;
@@ -302,11 +413,17 @@ static void hardirq_read_latency_static_map(int mapfd)
/**
* Read eBPF maps for hard IRQ.
+ *
+ * @return When it is not possible to parse /proc, it returns -1, on success it returns 0;
*/
-static void hardirq_reader()
+static int hardirq_reader()
{
- hardirq_read_latency_map(hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd);
+ if (hardirq_read_latency_map(hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd))
+ return -1;
+
hardirq_read_latency_static_map(hardirq_maps[HARDIRQ_MAP_LATENCY_STATIC].map_fd);
+
+ return 0;
}
static void hardirq_create_charts(int update_every)
@@ -372,25 +489,21 @@ static inline void hardirq_write_static_dims()
/**
* Main loop for this collector.
+ *
+ * @param em the main thread structure.
*/
static void hardirq_collector(ebpf_module_t *em)
{
- hardirq_ebpf_vals = callocz(
- (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs,
- sizeof(hardirq_ebpf_val_t)
- );
- hardirq_ebpf_static_vals = callocz(
- (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs,
- sizeof(hardirq_ebpf_static_val_t)
- );
-
+ memset(&hardirq_pub, 0, sizeof(hardirq_pub));
avl_init_lock(&hardirq_pub, hardirq_val_cmp);
+ ebpf_hardirq_aral_init();
// create chart and static dims.
pthread_mutex_lock(&lock);
hardirq_create_charts(em->update_every);
hardirq_create_static_dims();
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
pthread_mutex_unlock(&lock);
// loop and read from published data until ebpf plugin is closed.
@@ -406,7 +519,9 @@ static void hardirq_collector(ebpf_module_t *em)
continue;
counter = 0;
- hardirq_reader();
+ if (hardirq_reader())
+ break;
+
pthread_mutex_lock(&lock);
// write dims now for all hitherto discovered IRQs.
@@ -437,13 +552,11 @@ void *ebpf_hardirq_thread(void *ptr)
em->maps = hardirq_maps;
if (ebpf_enable_tracepoints(hardirq_tracepoints) == 0) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endhardirq;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endhardirq;
}
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.h b/collectors/ebpf.plugin/ebpf_hardirq.h
index fe38b1bb1..52dea1e56 100644
--- a/collectors/ebpf.plugin/ebpf_hardirq.h
+++ b/collectors/ebpf.plugin/ebpf_hardirq.h
@@ -3,6 +3,9 @@
#ifndef NETDATA_EBPF_HARDIRQ_H
#define NETDATA_EBPF_HARDIRQ_H 1
+#include <stdint.h>
+#include "libnetdata/avl/avl.h"
+
/*****************************************************************
* copied from kernel-collectors repo, with modifications needed
* for inclusion here.
@@ -15,12 +18,6 @@ typedef struct hardirq_ebpf_key {
int irq;
} hardirq_ebpf_key_t;
-typedef struct hardirq_ebpf_val {
- uint64_t latency;
- uint64_t ts;
- char name[NETDATA_HARDIRQ_NAME_LEN];
-} hardirq_ebpf_val_t;
-
enum hardirq_ebpf_static {
HARDIRQ_EBPF_STATIC_APIC_THERMAL,
HARDIRQ_EBPF_STATIC_APIC_THRESHOLD,
@@ -46,6 +43,9 @@ typedef struct hardirq_ebpf_static_val {
* below this is eBPF plugin-specific code.
*****************************************************************/
+// ARAL Name
+#define NETDATA_EBPF_HARDIRQ_ARAL_NAME "ebpf_harddirq"
+
#define NETDATA_EBPF_MODULE_NAME_HARDIRQ "hardirq"
#define NETDATA_HARDIRQ_CONFIG_FILE "hardirq.conf"
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c
index 1a5a7731e..fc794e5e5 100644
--- a/collectors/ebpf.plugin/ebpf_mdflush.c
+++ b/collectors/ebpf.plugin/ebpf_mdflush.c
@@ -46,7 +46,7 @@ static void ebpf_mdflush_free(ebpf_module_t *em)
{
freez(mdflush_ebpf_vals);
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -208,6 +208,7 @@ static void mdflush_collector(ebpf_module_t *em)
pthread_mutex_lock(&lock);
mdflush_create_charts(update_every);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
pthread_mutex_unlock(&lock);
// loop and read from published data until ebpf plugin is closed.
@@ -246,24 +247,19 @@ void *ebpf_mdflush_thread(void *ptr)
char *md_flush_request = ebpf_find_symbol("md_flush_request");
if (!md_flush_request) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
error("Cannot monitor MD devices, because md is not loaded.");
- }
- freez(md_flush_request);
-
- if (em->thread->enabled == NETDATA_THREAD_EBPF_STOPPED) {
goto endmdflush;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endmdflush;
}
mdflush_collector(em);
endmdflush:
+ freez(md_flush_request);
ebpf_update_disabled_plugin_stats(em);
netdata_thread_cleanup_pop(1);
diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c
index e06010b5b..a2a4c5530 100644
--- a/collectors/ebpf.plugin/ebpf_mount.c
+++ b/collectors/ebpf.plugin/ebpf_mount.c
@@ -18,8 +18,6 @@ struct config mount_config = { .first_section = NULL, .last_section = NULL, .mut
.index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
-static netdata_idx_t *mount_values = NULL;
-
static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END];
netdata_ebpf_targets_t mount_targets[] = { {.name = "mount", .mode = EBPF_LOAD_TRAMPOLINE},
@@ -27,10 +25,6 @@ netdata_ebpf_targets_t mount_targets[] = { {.name = "mount", .mode = EBPF_LOAD_T
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/mount.skel.h" // BTF code
-
-static struct mount_bpf *bpf_obj = NULL;
-
/*****************************************************************
*
* BTF FUNCTIONS
@@ -228,18 +222,7 @@ static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_
static void ebpf_mount_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- freez(mount_values);
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (bpf_obj)
- mount_bpf__destroy(bpf_obj);
-#endif
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -269,6 +252,10 @@ static void ebpf_mount_exit(void *ptr)
*/
static void ebpf_mount_read_global_table()
{
+ static netdata_idx_t *mount_values = NULL;
+ if (!mount_values)
+ mount_values = callocz((size_t)ebpf_nprocs + 1, sizeof(netdata_idx_t));
+
uint32_t idx;
netdata_idx_t *val = mount_hash_values;
netdata_idx_t *stored = mount_values;
@@ -311,7 +298,6 @@ static void ebpf_mount_send_data()
*/
static void mount_collector(ebpf_module_t *em)
{
- mount_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
memset(mount_hash_values, 0, sizeof(mount_hash_values));
heartbeat_t hb;
@@ -390,17 +376,16 @@ static int ebpf_mount_load_bpf(ebpf_module_t *em)
if (em->load & EBPF_LOAD_LEGACY) {
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- em->enabled = CONFIG_BOOLEAN_NO;
ret = -1;
}
}
#ifdef LIBBPF_MAJOR_VERSION
else {
- bpf_obj = mount_bpf__open();
- if (!bpf_obj)
+ mount_bpf_obj = mount_bpf__open();
+ if (!mount_bpf_obj)
ret = -1;
else
- ret = ebpf_mount_load_and_attach(bpf_obj, em);
+ ret = ebpf_mount_load_and_attach(mount_bpf_obj, em);
}
#endif
@@ -430,7 +415,6 @@ void *ebpf_mount_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_mount_load_bpf(em)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endmount;
}
@@ -442,6 +426,7 @@ void *ebpf_mount_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_mount_charts(em->update_every);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
pthread_mutex_unlock(&lock);
mount_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c
index 82420d54e..856c922ec 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.c
+++ b/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -47,18 +47,18 @@ static void oomkill_cleanup(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
static void oomkill_write_data(int32_t *keys, uint32_t total)
{
// for each app, see if it was OOM killed. record as 1 if so otherwise 0.
- struct target *w;
+ struct ebpf_target *w;
for (w = apps_groups_root_target; w != NULL; w = w->next) {
if (likely(w->exposed && w->processes)) {
bool was_oomkilled = false;
- struct pid_on_target *pids = w->root_pid;
+ struct ebpf_pid_on_target *pids = w->root_pid;
while (pids) {
uint32_t j;
for (j = 0; j < total; j++) {
@@ -299,27 +299,28 @@ static void oomkill_collector(ebpf_module_t *em)
int counter = update_every - 1;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
- if (!ebpf_exit_plugin || ++counter != update_every)
+ if (ebpf_exit_plugin || ++counter != update_every)
continue;
counter = 0;
- pthread_mutex_lock(&collect_data_mutex);
- pthread_mutex_lock(&lock);
uint32_t count = oomkill_read_data(keys);
- if (cgroups && count)
- ebpf_update_oomkill_cgroup(keys, count);
+ if (!count)
+ continue;
- // write everything from the ebpf map.
- if (cgroups)
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_mutex_lock(&lock);
+ if (cgroups) {
+ ebpf_update_oomkill_cgroup(keys, count);
+ // write everything from the ebpf map.
ebpf_oomkill_send_cgroup_data(update_every);
+ }
if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_OOMKILL_CHART);
oomkill_write_data(keys, count);
write_end_chart();
}
-
pthread_mutex_unlock(&lock);
pthread_mutex_unlock(&collect_data_mutex);
}
@@ -334,7 +335,7 @@ static void oomkill_collector(ebpf_module_t *em)
*/
void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- struct target *root = ptr;
+ struct ebpf_target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_OOMKILL_CHART,
"OOM kills",
EBPF_COMMON_DIMENSION_KILLS,
@@ -361,37 +362,36 @@ void *ebpf_oomkill_thread(void *ptr)
em->maps = oomkill_maps;
#define NETDATA_DEFAULT_OOM_DISABLED_MSG "Disabling OOMKILL thread, because"
- if (unlikely(!all_pids || !em->apps_charts)) {
+ if (unlikely(!ebpf_all_pids || !em->apps_charts)) {
// When we are not running integration with apps, we won't fill necessary variables for this thread to run, so
// we need to disable it.
- if (em->thread->enabled)
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (em->enabled)
info("%s apps integration is completely disabled.", NETDATA_DEFAULT_OOM_DISABLED_MSG);
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ goto endoomkill;
} else if (running_on_kernel < NETDATA_EBPF_KERNEL_4_14) {
- if (em->thread->enabled)
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (em->enabled)
info("%s kernel does not have necessary tracepoints.", NETDATA_DEFAULT_OOM_DISABLED_MSG);
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
- }
-
- if (em->thread->enabled == NETDATA_THREAD_EBPF_STOPPED) {
goto endoomkill;
}
if (ebpf_enable_tracepoints(oomkill_tracepoints) == 0) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endoomkill;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endoomkill;
}
pthread_mutex_lock(&lock);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
pthread_mutex_unlock(&lock);
oomkill_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index 9a191d391..66af47857 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -42,9 +42,6 @@ static netdata_idx_t *process_hash_values = NULL;
static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END];
static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END];
-ebpf_process_stat_t **global_process_stats = NULL;
-ebpf_process_publish_apps_t **current_apps_data = NULL;
-
int process_enabled = 0;
bool publish_internal_metrics = true;
@@ -56,6 +53,8 @@ struct config process_config = { .first_section = NULL,
static char *threads_stat[NETDATA_EBPF_THREAD_STAT_END] = {"total", "running"};
static char *load_event_stat[NETDATA_EBPF_LOAD_STAT_END] = {"legacy", "co-re"};
+static char *memlock_stat = {"memory_locked"};
+static char *hash_table_stat = {"hash_table"};
/*****************************************************************
*
@@ -138,19 +137,19 @@ static void ebpf_process_send_data(ebpf_module_t *em)
* Sum values for pid
*
* @param root the structure with all available PIDs
- *
* @param offset the address that we are reading
*
* @return it returns the sum of all PIDs
*/
-long long ebpf_process_sum_values_for_pids(struct pid_on_target *root, size_t offset)
+long long ebpf_process_sum_values_for_pids(struct ebpf_pid_on_target *root, size_t offset)
{
long long ret = 0;
while (root) {
int32_t pid = root->pid;
- ebpf_process_publish_apps_t *w = current_apps_data[pid];
+ ebpf_process_stat_t *w = global_process_stats[pid];
if (w) {
- ret += get_value_from_structure((char *)w, offset);
+ uint32_t *value = (uint32_t *)((char *)w + offset);
+ ret += *value;
}
root = root->next;
@@ -166,13 +165,13 @@ long long ebpf_process_sum_values_for_pids(struct pid_on_target *root, size_t of
*/
void ebpf_process_remove_pids()
{
- struct pid_stat *pids = root_of_pids;
+ struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
while (pids) {
uint32_t pid = pids->pid;
ebpf_process_stat_t *w = global_process_stats[pid];
if (w) {
- freez(w);
+ ebpf_process_stat_release(w);
global_process_stats[pid] = NULL;
bpf_map_delete_elem(pid_fd, &pid);
}
@@ -186,15 +185,15 @@ void ebpf_process_remove_pids()
*
* @param root the target list.
*/
-void ebpf_process_send_apps_data(struct target *root, ebpf_module_t *em)
+void ebpf_process_send_apps_data(struct ebpf_target *root, ebpf_module_t *em)
{
- struct target *w;
+ struct ebpf_target *w;
collected_number value;
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, create_process));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_process));
write_chart_dimension(w->name, value);
}
}
@@ -203,7 +202,7 @@ void ebpf_process_send_apps_data(struct target *root, ebpf_module_t *em)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, create_thread));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_thread));
write_chart_dimension(w->name, value);
}
}
@@ -212,8 +211,8 @@ void ebpf_process_send_apps_data(struct target *root, ebpf_module_t *em)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t,
- call_do_exit));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t,
+ exit_call));
write_chart_dimension(w->name, value);
}
}
@@ -222,8 +221,8 @@ void ebpf_process_send_apps_data(struct target *root, ebpf_module_t *em)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t,
- call_release_task));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t,
+ release_call));
write_chart_dimension(w->name, value);
}
}
@@ -233,7 +232,7 @@ void ebpf_process_send_apps_data(struct target *root, ebpf_module_t *em)
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t,
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t,
task_err));
write_chart_dimension(w->name, value);
}
@@ -284,38 +283,6 @@ static void read_hash_global_tables()
}
/**
- * Read the hash table and store data to allocated vectors.
- */
-static void ebpf_process_update_apps_data()
-{
- struct pid_stat *pids = root_of_pids;
- while (pids) {
- uint32_t current_pid = pids->pid;
- ebpf_process_stat_t *ps = global_process_stats[current_pid];
- if (!ps) {
- pids = pids->next;
- continue;
- }
-
- ebpf_process_publish_apps_t *cad = current_apps_data[current_pid];
- if (!cad) {
- cad = callocz(1, sizeof(ebpf_process_publish_apps_t));
- current_apps_data[current_pid] = cad;
- }
-
- //Read data
- cad->call_do_exit = ps->exit_call;
- cad->call_release_task = ps->release_call;
- cad->create_process = ps->create_process;
- cad->create_thread = ps->create_thread;
-
- cad->task_err = ps->task_err;
-
- pids = pids->next;
- }
-}
-
-/**
* Update cgroup
*
* Update cgroup data based in
@@ -490,6 +457,56 @@ static inline void ebpf_create_statistic_load_chart(ebpf_module_t *em)
}
/**
+ * Create chart for Kernel Memory
+ *
+ * Write to standard output current values for allocated memory.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static inline void ebpf_create_statistic_kernel_memory(ebpf_module_t *em)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ NETDATA_EBPF_KERNEL_MEMORY,
+ "Memory allocated for hash tables.",
+ "bytes",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ 140002,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(memlock_stat,
+ memlock_stat,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+}
+
+/**
+ * Create chart Hash Table
+ *
+ * Write to standard output number of hash tables used with this software.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static inline void ebpf_create_statistic_hash_tables(ebpf_module_t *em)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ NETDATA_EBPF_HASH_TABLES_LOADED,
+ "Number of hash tables loaded.",
+ "hash tables",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ 140003,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(hash_table_stat,
+ hash_table_stat,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+}
+
+/**
* Update Internal Metric variable
*
* By default eBPF.plugin sends internal metrics for netdata, but user can
@@ -520,6 +537,10 @@ static void ebpf_create_statistic_charts(ebpf_module_t *em)
ebpf_create_statistic_thread_chart(em);
ebpf_create_statistic_load_chart(em);
+
+ ebpf_create_statistic_kernel_memory(em);
+
+ ebpf_create_statistic_hash_tables(em);
}
/**
@@ -532,7 +553,7 @@ static void ebpf_create_statistic_charts(ebpf_module_t *em)
*/
void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- struct target *root = ptr;
+ struct ebpf_target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_PROCESS,
"Process started",
EBPF_COMMON_DIMENSION_CALL,
@@ -584,58 +605,6 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
}
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param root a pointer for the targets.
- */
-static void ebpf_create_apps_charts(struct target *root)
-{
- if (unlikely(!all_pids))
- return;
-
- struct target *w;
- int newly_added = 0;
-
- for (w = root; w; w = w->next) {
- if (w->target)
- continue;
-
- if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
- struct pid_on_target *pid_on_target;
-
- fprintf(
- stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
- (w->processes == 1) ? "" : "es");
-
- for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
- fprintf(stderr, " %d", pid_on_target->pid);
- }
-
- fputc('\n', stderr);
- }
-
- if (!w->exposed && w->processes) {
- newly_added++;
- w->exposed = 1;
- if (debug_enabled || w->debug_enabled)
- debug_log_int("%s just added - regenerating charts.", w->name);
- }
- }
-
- if (!newly_added)
- return;
-
- int counter;
- for (counter = 0; ebpf_modules[counter].thread_name; counter++) {
- ebpf_module_t *current = &ebpf_modules[counter];
- if (current->enabled && current->apps_charts && current->apps_routine)
- current->apps_routine(current, root);
- }
-}
-
/*****************************************************************
*
* FUNCTIONS TO CLOSE THE THREAD
@@ -677,13 +646,13 @@ static void ebpf_process_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- ebpf_cleanup_publish_syscall(process_publish_aggregated);
freez(process_hash_values);
ebpf_process_disable_tracepoints();
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ process_pid_fd = -1;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -1010,8 +979,7 @@ void ebpf_process_update_cgroup_algorithm()
int i;
for (i = 0; i < NETDATA_KEY_PUBLISH_PROCESS_END; i++) {
netdata_publish_syscall_t *ptr = &process_publish_aggregated[i];
- freez(ptr->algorithm);
- ptr->algorithm = strdupz(ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
+ ptr->algorithm = ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX];
}
}
@@ -1034,6 +1002,14 @@ void ebpf_send_statistic_data()
write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY], (long long)plugin_statistics.legacy);
write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE], (long long)plugin_statistics.core);
write_end_chart();
+
+ write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_KERNEL_MEMORY);
+ write_chart_dimension(memlock_stat, (long long)plugin_statistics.memlock_kern);
+ write_end_chart();
+
+ write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_LOADED);
+ write_chart_dimension(hash_table_stat, (long long)plugin_statistics.hash_tables);
+ write_end_chart();
}
/**
@@ -1047,29 +1023,21 @@ static void process_collector(ebpf_module_t *em)
heartbeat_init(&hb);
int publish_global = em->global_charts;
int cgroups = em->cgroup_charts;
+ pthread_mutex_lock(&ebpf_exit_cleanup);
int thread_enabled = em->enabled;
+ process_pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
if (cgroups)
ebpf_process_update_cgroup_algorithm();
- int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
- int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
int update_every = em->update_every;
int counter = update_every - 1;
- int update_apps_list = update_apps_every - 1;
while (!ebpf_exit_plugin) {
usec_t dt = heartbeat_next(&hb, USEC_PER_SEC);
(void)dt;
if (ebpf_exit_plugin)
break;
- pthread_mutex_lock(&collect_data_mutex);
- if (++update_apps_list == update_apps_every) {
- update_apps_list = 0;
- cleanup_exited_pids();
- collect_data_for_all_processes(pid_fd);
- }
- pthread_mutex_unlock(&collect_data_mutex);
-
if (++counter == update_every) {
counter = 0;
@@ -1078,12 +1046,7 @@ static void process_collector(ebpf_module_t *em)
netdata_apps_integration_flags_t apps_enabled = em->apps_charts;
pthread_mutex_lock(&collect_data_mutex);
- ebpf_create_apps_charts(apps_groups_root_target);
- if (all_pids_count > 0) {
- if (apps_enabled) {
- ebpf_process_update_apps_data();
- }
-
+ if (ebpf_all_pids_count > 0) {
if (cgroups && shm_ebpf_cgroup.header) {
ebpf_update_process_cgroup();
}
@@ -1092,7 +1055,7 @@ static void process_collector(ebpf_module_t *em)
pthread_mutex_lock(&lock);
ebpf_send_statistic_data();
- if (thread_enabled) {
+ if (thread_enabled == NETDATA_THREAD_EBPF_RUNNING) {
if (publish_global) {
ebpf_process_send_data(em);
}
@@ -1101,6 +1064,11 @@ static void process_collector(ebpf_module_t *em)
ebpf_process_send_apps_data(apps_groups_root_target, em);
}
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_process_stat)
+ ebpf_send_data_aral_chart(ebpf_aral_process_stat, em);
+#endif
+
if (cgroups && shm_ebpf_cgroup.header) {
ebpf_process_send_cgroup_data(em);
}
@@ -1133,7 +1101,6 @@ static void ebpf_process_allocate_global_vectors(size_t length)
process_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
global_process_stats = callocz((size_t)pid_max, sizeof(ebpf_process_stat_t *));
- current_apps_data = callocz((size_t)pid_max, sizeof(ebpf_process_publish_apps_t *));
}
static void change_syscalls()
@@ -1213,10 +1180,12 @@ void *ebpf_process_thread(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = process_maps;
+ pthread_mutex_lock(&ebpf_exit_cleanup);
if (ebpf_process_enable_tracepoints()) {
- em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = CONFIG_BOOLEAN_NO;
+ em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = NETDATA_THREAD_EBPF_STOPPING;
}
process_enabled = em->enabled;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
pthread_mutex_lock(&lock);
ebpf_process_allocate_global_vectors(NETDATA_KEY_PUBLISH_PROCESS_END);
@@ -1226,7 +1195,6 @@ void *ebpf_process_thread(void *ptr)
set_local_pointers();
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- em->enabled = CONFIG_BOOLEAN_NO;
pthread_mutex_unlock(&lock);
goto endprocess;
}
@@ -1239,11 +1207,18 @@ void *ebpf_process_thread(void *ptr)
process_aggregated_data, process_publish_aggregated, process_dimension_names, process_id_names,
algorithms, NETDATA_KEY_PUBLISH_PROCESS_END);
- if (process_enabled) {
+ if (process_enabled == NETDATA_THREAD_EBPF_RUNNING) {
ebpf_create_global_charts(em);
}
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
+
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_process_stat)
+ ebpf_statistic_create_aral_chart(NETDATA_EBPF_PROC_ARAL_NAME, em);
+#endif
+
ebpf_create_statistic_charts(em);
pthread_mutex_unlock(&lock);
@@ -1251,8 +1226,10 @@ void *ebpf_process_thread(void *ptr)
process_collector(em);
endprocess:
- if (!em->enabled)
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (em->enabled == NETDATA_THREAD_EBPF_RUNNING)
ebpf_update_disabled_plugin_stats(em);
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
netdata_thread_cleanup_pop(1);
return NULL;
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
index 6fded16fc..5f119aea1 100644
--- a/collectors/ebpf.plugin/ebpf_process.h
+++ b/collectors/ebpf.plugin/ebpf_process.h
@@ -85,17 +85,6 @@ typedef enum netdata_publish_process {
NETDATA_KEY_PUBLISH_PROCESS_END
} netdata_publish_process_t;
-typedef struct ebpf_process_publish_apps {
- // Number of calls during the last read
- uint64_t call_do_exit;
- uint64_t call_release_task;
- uint64_t create_process;
- uint64_t create_thread;
-
- // Number of errors during the last read
- uint64_t task_err;
-} ebpf_process_publish_apps_t;
-
enum ebpf_process_tables {
NETDATA_PROCESS_PID_TABLE,
NETDATA_PROCESS_GLOBAL_TABLE,
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c
index 4057eff7f..f81c01964 100644
--- a/collectors/ebpf.plugin/ebpf_shm.c
+++ b/collectors/ebpf.plugin/ebpf_shm.c
@@ -12,8 +12,6 @@ netdata_publish_shm_t *shm_vector = NULL;
static netdata_idx_t shm_hash_values[NETDATA_SHM_END];
static netdata_idx_t *shm_values = NULL;
-netdata_publish_shm_t **shm_pid = NULL;
-
struct config shm_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
@@ -41,10 +39,6 @@ netdata_ebpf_targets_t shm_targets[] = { {.name = "shmget", .mode = EBPF_LOAD_TR
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/shm.skel.h"
-
-static struct shm_bpf *bpf_obj = NULL;
-
/*****************************************************************
*
* BTF FUNCTIONS
@@ -287,22 +281,11 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e
*/
static void ebpf_shm_free(ebpf_module_t *em)
{
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- ebpf_cleanup_publish_syscall(shm_publish_aggregated);
-
freez(shm_vector);
freez(shm_values);
-#ifdef LIBBPF_MAJOR_VERSION
- if (bpf_obj)
- shm_bpf__destroy(bpf_obj);
-#endif
-
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -355,7 +338,7 @@ static void shm_fill_pid(uint32_t current_pid, netdata_publish_shm_t *publish)
{
netdata_publish_shm_t *curr = shm_pid[current_pid];
if (!curr) {
- curr = callocz(1, sizeof(netdata_publish_shm_t));
+ curr = ebpf_shm_stat_get( );
shm_pid[current_pid] = curr;
}
@@ -411,7 +394,7 @@ static void read_apps_table()
{
netdata_publish_shm_t *cv = shm_vector;
uint32_t key;
- struct pid_stat *pids = root_of_pids;
+ struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
size_t length = sizeof(netdata_publish_shm_t)*ebpf_nprocs;
while (pids) {
@@ -487,7 +470,7 @@ static void ebpf_shm_read_global_table()
/**
* Sum values for all targets.
*/
-static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct pid_on_target *root)
+static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct ebpf_pid_on_target *root)
{
while (root) {
int32_t pid = root->pid;
@@ -513,9 +496,9 @@ static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct pid_on_target *
*
* @param root the target list.
*/
-void ebpf_shm_send_apps_data(struct target *root)
+void ebpf_shm_send_apps_data(struct ebpf_target *root)
{
- struct target *w;
+ struct ebpf_target *w;
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
ebpf_shm_sum_pids(&w->shm, w->root_pid);
@@ -873,6 +856,11 @@ static void shm_collector(ebpf_module_t *em)
ebpf_shm_send_apps_data(apps_groups_root_target);
}
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_shm_pid)
+ ebpf_send_data_aral_chart(ebpf_aral_shm_pid, em);
+#endif
+
if (cgroups) {
ebpf_shm_send_cgroup_data(update_every);
}
@@ -895,7 +883,7 @@ static void shm_collector(ebpf_module_t *em)
*/
void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- struct target *root = ptr;
+ struct ebpf_target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_SHMGET_CHART,
"Calls to syscall <code>shmget(2)</code>.",
EBPF_COMMON_DIMENSION_CALL,
@@ -945,10 +933,11 @@ void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr)
*/
static void ebpf_shm_allocate_global_vectors(int apps)
{
- if (apps)
+ if (apps) {
+ ebpf_shm_aral_init();
shm_pid = callocz((size_t)pid_max, sizeof(netdata_publish_shm_t *));
-
- shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t));
+ shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t));
+ }
shm_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
@@ -1001,17 +990,16 @@ static int ebpf_shm_load_bpf(ebpf_module_t *em)
if (em->load & EBPF_LOAD_LEGACY) {
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- em->enabled = CONFIG_BOOLEAN_NO;
ret = -1;
}
}
#ifdef LIBBPF_MAJOR_VERSION
else {
- bpf_obj = shm_bpf__open();
- if (!bpf_obj)
+ shm_bpf_obj = shm_bpf__open();
+ if (!shm_bpf_obj)
ret = -1;
else
- ret = ebpf_shm_load_and_attach(bpf_obj, em);
+ ret = ebpf_shm_load_and_attach(shm_bpf_obj, em);
}
#endif
@@ -1041,7 +1029,6 @@ void *ebpf_shm_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_shm_load_bpf(em)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endshm;
}
@@ -1065,6 +1052,12 @@ void *ebpf_shm_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_shm_charts(em->update_every);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_shm_pid)
+ ebpf_statistic_create_aral_chart(NETDATA_EBPF_SHM_ARAL_NAME, em);
+#endif
+
pthread_mutex_unlock(&lock);
shm_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_shm.h b/collectors/ebpf.plugin/ebpf_shm.h
index b06a4a5d1..f58eaa6c1 100644
--- a/collectors/ebpf.plugin/ebpf_shm.h
+++ b/collectors/ebpf.plugin/ebpf_shm.h
@@ -27,6 +27,9 @@
#define NETDATA_SYSTEMD_SHM_DT_CONTEXT "services.shmdt"
#define NETDATA_SYSTEMD_SHM_CTL_CONTEXT "services.shmctl"
+// ARAL name
+#define NETDATA_EBPF_SHM_ARAL_NAME "ebpf_shm"
+
typedef struct netdata_publish_shm {
uint64_t get;
uint64_t at;
@@ -50,10 +53,9 @@ enum shm_counters {
NETDATA_SHM_END
};
-extern netdata_publish_shm_t **shm_pid;
-
void *ebpf_shm_thread(void *ptr);
void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr);
+void ebpf_shm_release(netdata_publish_shm_t *stat);
extern netdata_ebpf_targets_t shm_targets[];
extern struct config shm_config;
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
index 1954be714..aebc9ca12 100644
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -5,6 +5,9 @@
#include "ebpf.h"
#include "ebpf_socket.h"
+// ----------------------------------------------------------------------------
+// ARAL vectors used to speed up processing
+
/*****************************************************************
*
* GLOBAL VARIABLES
@@ -58,7 +61,6 @@ static netdata_idx_t *socket_hash_values = NULL;
static netdata_syscall_stat_t socket_aggregated_data[NETDATA_MAX_SOCKET_VECTOR];
static netdata_publish_syscall_t socket_publish_aggregated[NETDATA_MAX_SOCKET_VECTOR];
-ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
static ebpf_bandwidth_t *bandwidth_vector = NULL;
pthread_mutex_t nv_mutex;
@@ -97,10 +99,6 @@ struct netdata_static_thread socket_threads = {
};
#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/socket.skel.h" // BTF code
-
-static struct socket_bpf *bpf_obj = NULL;
-
/**
* Disable Probe
*
@@ -454,7 +452,6 @@ static inline void clean_internal_socket_plot(netdata_socket_plot_t *ptr)
* Clean socket plot
*
* Clean the allocated data for inbound and outbound vectors.
- */
static void clean_allocated_socket_plot()
{
if (!network_viewer_opt.enabled)
@@ -476,12 +473,12 @@ static void clean_allocated_socket_plot()
}
clean_internal_socket_plot(&plot[outbound_vectors.last]);
}
+ */
/**
* Clean network ports allocated during initialization.
*
* @param ptr a pointer to the link list.
- */
static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr)
{
if (unlikely(!ptr))
@@ -494,6 +491,7 @@ static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr)
ptr = next;
}
}
+ */
/**
* Clean service names
@@ -501,7 +499,6 @@ static void clean_network_ports(ebpf_network_viewer_port_list_t *ptr)
* Clean the allocated link list that stores names.
*
* @param names the link list.
- */
static void clean_service_names(ebpf_network_viewer_dim_name_t *names)
{
if (unlikely(!names))
@@ -514,12 +511,12 @@ static void clean_service_names(ebpf_network_viewer_dim_name_t *names)
names = next;
}
}
+ */
/**
* Clean hostnames
*
* @param hostnames the hostnames to clean
- */
static void clean_hostnames(ebpf_network_viewer_hostname_list_t *hostnames)
{
if (unlikely(!hostnames))
@@ -533,19 +530,7 @@ static void clean_hostnames(ebpf_network_viewer_hostname_list_t *hostnames)
hostnames = next;
}
}
-
-/**
- * Cleanup publish syscall
- *
- * @param nps list of structures to clean
*/
-void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps)
-{
- while (nps) {
- freez(nps->algorithm);
- nps = nps->next;
- }
-}
/**
* Clean port Structure
@@ -596,15 +581,8 @@ static void clean_ip_structure(ebpf_network_viewer_ip_list_t **clean)
*/
static void ebpf_socket_free(ebpf_module_t *em )
{
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- ebpf_cleanup_publish_syscall(socket_publish_aggregated);
+ /* We can have thousands of sockets to clean, so we are transferring
+ * for OS the responsibility while we do not use ARAL here
freez(socket_hash_values);
freez(bandwidth_vector);
@@ -616,25 +594,17 @@ static void ebpf_socket_free(ebpf_module_t *em )
clean_port_structure(&listen_ports);
- ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled = 0;
-
clean_network_ports(network_viewer_opt.included_port);
clean_network_ports(network_viewer_opt.excluded_port);
clean_service_names(network_viewer_opt.names);
clean_hostnames(network_viewer_opt.included_hostnames);
clean_hostnames(network_viewer_opt.excluded_hostnames);
+ */
pthread_mutex_destroy(&nv_mutex);
- freez(socket_threads.thread);
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (bpf_obj)
- socket_bpf__destroy(bpf_obj);
-#endif
-
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -648,8 +618,10 @@ static void ebpf_socket_free(ebpf_module_t *em )
static void ebpf_socket_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
+ pthread_mutex_lock(&nv_mutex);
if (socket_threads.thread)
netdata_thread_cancel(*socket_threads.thread);
+ pthread_mutex_unlock(&nv_mutex);
ebpf_socket_free(em);
}
@@ -662,8 +634,7 @@ static void ebpf_socket_exit(void *ptr)
*/
void ebpf_socket_cleanup(void *ptr)
{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- ebpf_socket_free(em);
+ UNUSED(ptr);
}
/*****************************************************************
@@ -958,7 +929,7 @@ static void ebpf_socket_send_data(ebpf_module_t *em)
*
* @return it returns the sum of all PIDs
*/
-long long ebpf_socket_sum_values_for_pids(struct pid_on_target *root, size_t offset)
+long long ebpf_socket_sum_values_for_pids(struct ebpf_pid_on_target *root, size_t offset)
{
long long ret = 0;
while (root) {
@@ -980,11 +951,11 @@ long long ebpf_socket_sum_values_for_pids(struct pid_on_target *root, size_t off
* @param em the structure with thread information
* @param root the target list.
*/
-void ebpf_socket_send_apps_data(ebpf_module_t *em, struct target *root)
+void ebpf_socket_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
{
UNUSED(em);
- struct target *w;
+ struct ebpf_target *w;
collected_number value;
write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V4);
@@ -1217,7 +1188,7 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
*/
void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- struct target *root = ptr;
+ struct ebpf_target *root = ptr;
int order = 20080;
ebpf_create_charts_on_apps(NETDATA_NET_APPS_CONNECTION_TCP_V4,
"Calls to tcp_v4_connection", EBPF_COMMON_DIMENSION_CONNECTIONS,
@@ -2156,10 +2127,11 @@ void *ebpf_socket_read_hash(void *ptr)
heartbeat_init(&hb);
int fd_ipv4 = socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd;
int fd_ipv6 = socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd;
- while (!ebpf_exit_plugin) {
+ // This thread is cancelled from another thread
+ for (;;) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin)
- continue;
+ break;
pthread_mutex_lock(&nv_mutex);
ebpf_read_socket_hash_table(fd_ipv4, AF_INET);
@@ -2227,7 +2199,7 @@ void ebpf_socket_fill_publish_apps(uint32_t current_pid, ebpf_bandwidth_t *eb)
{
ebpf_socket_publish_apps_t *curr = socket_bandwidth_curr[current_pid];
if (!curr) {
- curr = callocz(1, sizeof(ebpf_socket_publish_apps_t));
+ curr = ebpf_socket_stat_get();
socket_bandwidth_curr[current_pid] = curr;
}
@@ -2275,7 +2247,7 @@ static void ebpf_socket_update_apps_data()
int fd = socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd;
ebpf_bandwidth_t *eb = bandwidth_vector;
uint32_t key;
- struct pid_stat *pids = root_of_pids;
+ struct ebpf_pid_stat *pids = ebpf_root_of_pids;
while (pids) {
key = pids->pid;
@@ -2794,8 +2766,7 @@ void ebpf_socket_update_cgroup_algorithm()
int i;
for (i = 0; i < NETDATA_MAX_SOCKET_VECTOR; i++) {
netdata_publish_syscall_t *ptr = &socket_publish_aggregated[i];
- freez(ptr->algorithm);
- ptr->algorithm = strdupz(ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
+ ptr->algorithm = ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX];
}
}
@@ -2904,6 +2875,11 @@ static void socket_collector(ebpf_module_t *em)
if (socket_apps_enabled & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
ebpf_socket_send_apps_data(em, apps_groups_root_target);
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_socket_pid)
+ ebpf_send_data_aral_chart(ebpf_aral_socket_pid, em);
+#endif
+
if (cgroups)
ebpf_socket_send_cgroup_data(update_every);
@@ -2947,10 +2923,11 @@ static void ebpf_socket_allocate_global_vectors(int apps)
memset(socket_publish_aggregated, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_publish_syscall_t));
socket_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
- if (apps)
+ if (apps) {
+ ebpf_socket_aral_init();
socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *));
-
- bandwidth_vector = callocz((size_t)ebpf_nprocs, sizeof(ebpf_bandwidth_t));
+ bandwidth_vector = callocz((size_t)ebpf_nprocs, sizeof(ebpf_bandwidth_t));
+ }
socket_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_socket_t));
if (network_viewer_opt.enabled) {
@@ -3722,7 +3699,7 @@ static void link_hostnames(char *parse)
ebpf_network_viewer_hostname_list_t *hostname = callocz(1 , sizeof(ebpf_network_viewer_hostname_list_t));
hostname->value = strdupz(parse);
hostname->hash = simple_hash(parse);
- hostname->value_pattern = simple_pattern_create(parse, NULL, SIMPLE_PATTERN_EXACT);
+ hostname->value_pattern = simple_pattern_create(parse, NULL, SIMPLE_PATTERN_EXACT, true);
link_hostname((!neg)?&network_viewer_opt.included_hostnames:&network_viewer_opt.excluded_hostnames,
hostname);
@@ -3888,11 +3865,11 @@ static int ebpf_socket_load_bpf(ebpf_module_t *em)
}
#ifdef LIBBPF_MAJOR_VERSION
else {
- bpf_obj = socket_bpf__open();
- if (!bpf_obj)
+ socket_bpf_obj = socket_bpf__open();
+ if (!socket_bpf_obj)
ret = -1;
else
- ret = ebpf_socket_load_and_attach(bpf_obj, em);
+ ret = ebpf_socket_load_and_attach(socket_bpf_obj, em);
}
#endif
@@ -3922,7 +3899,6 @@ void *ebpf_socket_thread(void *ptr)
parse_table_size_options(&socket_config);
if (pthread_mutex_init(&nv_mutex, NULL)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
error("Cannot initialize local mutex");
goto endsocket;
}
@@ -3945,7 +3921,6 @@ void *ebpf_socket_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_socket_load_bpf(em)) {
- em->enabled = CONFIG_BOOLEAN_NO;
pthread_mutex_unlock(&lock);
goto endsocket;
}
@@ -3964,6 +3939,12 @@ void *ebpf_socket_thread(void *ptr)
ebpf_create_global_charts(em);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
+
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_socket_pid)
+ ebpf_statistic_create_aral_chart(NETDATA_EBPF_SOCKET_ARAL_NAME, em);
+#endif
pthread_mutex_unlock(&lock);
diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h
index 63b1e107b..1ba20e65e 100644
--- a/collectors/ebpf.plugin/ebpf_socket.h
+++ b/collectors/ebpf.plugin/ebpf_socket.h
@@ -160,6 +160,9 @@ typedef enum ebpf_socket_idx {
#define NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT "services.net_udp_recv"
#define NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT "services.net_udp_send"
+// ARAL name
+#define NETDATA_EBPF_SOCKET_ARAL_NAME "ebpf_socket"
+
typedef struct ebpf_socket_publish_apps {
// Data read
uint64_t bytes_sent; // Bytes sent
@@ -364,7 +367,6 @@ void parse_network_viewer_section(struct config *cfg);
void ebpf_fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table);
void parse_service_name_section(struct config *cfg);
-extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
extern struct config socket_config;
extern netdata_ebpf_targets_t socket_targets[];
diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c
index 49e9c3051..33abbdf5e 100644
--- a/collectors/ebpf.plugin/ebpf_softirq.c
+++ b/collectors/ebpf.plugin/ebpf_softirq.c
@@ -64,7 +64,7 @@ static softirq_ebpf_val_t *softirq_ebpf_vals = NULL;
static void ebpf_softirq_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
for (int i = 0; softirq_tracepoints[i].class != NULL; i++) {
@@ -73,7 +73,7 @@ static void ebpf_softirq_free(ebpf_module_t *em)
freez(softirq_ebpf_vals);
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -164,6 +164,7 @@ static void softirq_collector(ebpf_module_t *em)
softirq_create_charts(em->update_every);
softirq_create_dims();
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
pthread_mutex_unlock(&lock);
// loop and read from published data until ebpf plugin is closed.
@@ -208,13 +209,11 @@ void *ebpf_softirq_thread(void *ptr)
em->maps = softirq_maps;
if (ebpf_enable_tracepoints(softirq_tracepoints) == 0) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endsoftirq;
}
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endsoftirq;
}
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
index 059efb63b..2352470a4 100644
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ b/collectors/ebpf.plugin/ebpf_swap.c
@@ -7,12 +7,10 @@ static char *swap_dimension_name[NETDATA_SWAP_END] = { "read", "write" };
static netdata_syscall_stat_t swap_aggregated_data[NETDATA_SWAP_END];
static netdata_publish_syscall_t swap_publish_aggregated[NETDATA_SWAP_END];
-netdata_publish_swap_t *swap_vector = NULL;
-
static netdata_idx_t swap_hash_values[NETDATA_SWAP_END];
static netdata_idx_t *swap_values = NULL;
-netdata_publish_swap_t **swap_pid = NULL;
+netdata_publish_swap_t *swap_vector = NULL;
struct config swap_config = { .first_section = NULL,
.last_section = NULL,
@@ -39,10 +37,6 @@ netdata_ebpf_targets_t swap_targets[] = { {.name = "swap_readpage", .mode = EBPF
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/swap.skel.h" // BTF code
-
-static struct swap_bpf *bpf_obj = NULL;
-
/**
* Disable probe
*
@@ -224,21 +218,11 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t
*/
static void ebpf_swap_free(ebpf_module_t *em)
{
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- ebpf_cleanup_publish_syscall(swap_publish_aggregated);
-
freez(swap_vector);
freez(swap_values);
-#ifdef LIBBPF_MAJOR_VERSION
- if (bpf_obj)
- swap_bpf__destroy(bpf_obj);
-#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -341,7 +325,7 @@ static void read_apps_table()
{
netdata_publish_swap_t *cv = swap_vector;
uint32_t key;
- struct pid_stat *pids = root_of_pids;
+ struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
size_t length = sizeof(netdata_publish_swap_t)*ebpf_nprocs;
while (pids) {
@@ -410,7 +394,7 @@ static void ebpf_swap_read_global_table()
* @param swap
* @param root
*/
-static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct pid_on_target *root)
+static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct ebpf_pid_on_target *root)
{
uint64_t local_read = 0;
uint64_t local_write = 0;
@@ -435,9 +419,9 @@ static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct pid_on_targe
*
* @param root the target list.
*/
-void ebpf_swap_send_apps_data(struct target *root)
+void ebpf_swap_send_apps_data(struct ebpf_target *root)
{
- struct target *w;
+ struct ebpf_target *w;
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
ebpf_swap_sum_pids(&w->swap, w->root_pid);
@@ -707,7 +691,7 @@ static void swap_collector(ebpf_module_t *em)
*/
void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- struct target *root = ptr;
+ struct ebpf_target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_MEM_SWAP_READ_CHART,
"Calls to function <code>swap_readpage</code>.",
EBPF_COMMON_DIMENSION_CALL,
@@ -829,7 +813,6 @@ void *ebpf_swap_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_swap_load_bpf(em)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endswap;
}
@@ -842,6 +825,7 @@ void *ebpf_swap_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_swap_charts(em->update_every);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
pthread_mutex_unlock(&lock);
swap_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_swap.h b/collectors/ebpf.plugin/ebpf_swap.h
index 79182e52e..8ca980bf0 100644
--- a/collectors/ebpf.plugin/ebpf_swap.h
+++ b/collectors/ebpf.plugin/ebpf_swap.h
@@ -42,8 +42,6 @@ enum swap_counters {
NETDATA_SWAP_END
};
-extern netdata_publish_swap_t **swap_pid;
-
void *ebpf_swap_thread(void *ptr);
void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr);
diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c
index 7c81c1df3..f838b65af 100644
--- a/collectors/ebpf.plugin/ebpf_sync.c
+++ b/collectors/ebpf.plugin/ebpf_sync.c
@@ -204,16 +204,12 @@ void ebpf_sync_cleanup_objects()
*/
static void ebpf_sync_free(ebpf_module_t *em)
{
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
#ifdef LIBBPF_MAJOR_VERSION
ebpf_sync_cleanup_objects();
#endif
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -523,7 +519,6 @@ void *ebpf_sync_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_sync_initialize_syscall(em)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endsync;
}
diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c
index b3c0ba45d..e2d87fd52 100644
--- a/collectors/ebpf.plugin/ebpf_vfs.c
+++ b/collectors/ebpf.plugin/ebpf_vfs.c
@@ -13,7 +13,6 @@ static char *vfs_id_names[NETDATA_KEY_PUBLISH_VFS_END] = { "vfs_unlink", "vfs_re
static netdata_idx_t *vfs_hash_values = NULL;
static netdata_syscall_stat_t vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_END];
static netdata_publish_syscall_t vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_END];
-netdata_publish_vfs_t **vfs_pid = NULL;
netdata_publish_vfs_t *vfs_vector = NULL;
static ebpf_local_maps_t vfs_maps[] = {{.name = "tbl_vfs_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
@@ -46,10 +45,6 @@ netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/vfs.skel.h" // BTF code
-
-static struct vfs_bpf *bpf_obj = NULL;
-
/**
* Disable probe
*
@@ -397,20 +392,11 @@ static inline int ebpf_vfs_load_and_attach(struct vfs_bpf *obj, ebpf_module_t *e
*/
static void ebpf_vfs_free(ebpf_module_t *em)
{
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
freez(vfs_hash_values);
freez(vfs_vector);
-#ifdef LIBBPF_MAJOR_VERSION
- if (bpf_obj)
- vfs_bpf__destroy(bpf_obj);
-#endif
-
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ em->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
@@ -540,7 +526,7 @@ static void ebpf_vfs_read_global_table()
* @param swap output structure
* @param root link list with structure to be used
*/
-static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct pid_on_target *root)
+static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct ebpf_pid_on_target *root)
{
netdata_publish_vfs_t accumulator;
memset(&accumulator, 0, sizeof(accumulator));
@@ -606,9 +592,9 @@ static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct pid_on_target *
* @param em the structure with thread information
* @param root the target list.
*/
-void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct target *root)
+void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
{
- struct target *w;
+ struct ebpf_target *w;
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
ebpf_vfs_sum_pids(&w->vfs, w->root_pid);
@@ -775,7 +761,7 @@ static void vfs_fill_pid(uint32_t current_pid, netdata_publish_vfs_t *publish)
{
netdata_publish_vfs_t *curr = vfs_pid[current_pid];
if (!curr) {
- curr = callocz(1, sizeof(netdata_publish_vfs_t));
+ curr = ebpf_vfs_get();
vfs_pid[current_pid] = curr;
}
@@ -787,7 +773,7 @@ static void vfs_fill_pid(uint32_t current_pid, netdata_publish_vfs_t *publish)
*/
static void ebpf_vfs_read_apps()
{
- struct pid_stat *pids = root_of_pids;
+ struct ebpf_pid_stat *pids = ebpf_root_of_pids;
netdata_publish_vfs_t *vv = vfs_vector;
int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
size_t length = sizeof(netdata_publish_vfs_t) * ebpf_nprocs;
@@ -926,88 +912,88 @@ static void ebpf_create_specific_vfs_charts(char *type, ebpf_module_t *em)
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_UNLINK_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5500,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5501,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5502,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5503,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5504,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk",
EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5505,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk",
EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5506,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls for <code>vfs_fsync</code>",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_FSYNC_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_FSYNC_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5508,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls for <code>vfs_open</code>",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_OPEN_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_OPEN_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5510,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls for <code>vfs_create</code>",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_CREATE_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error",
EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_CREATE_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5512,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
}
}
@@ -1484,6 +1470,11 @@ static void vfs_collector(ebpf_module_t *em)
if (apps)
ebpf_vfs_read_apps();
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_vfs_pid)
+ ebpf_send_data_aral_chart(ebpf_aral_vfs_pid, em);
+#endif
+
if (cgroups)
read_update_vfs_cgroup();
@@ -1683,7 +1674,7 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
**/
void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- struct target *root = ptr;
+ struct ebpf_target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED,
"Files deleted",
@@ -1825,14 +1816,16 @@ void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr)
*/
static void ebpf_vfs_allocate_global_vectors(int apps)
{
+ if (apps) {
+ ebpf_vfs_aral_init();
+ vfs_pid = callocz((size_t)pid_max, sizeof(netdata_publish_vfs_t *));
+ vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_publish_vfs_t));
+ }
+
memset(vfs_aggregated_data, 0, sizeof(vfs_aggregated_data));
memset(vfs_publish_aggregated, 0, sizeof(vfs_publish_aggregated));
vfs_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
- vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_publish_vfs_t));
-
- if (apps)
- vfs_pid = callocz((size_t)pid_max, sizeof(netdata_publish_vfs_t *));
}
/*****************************************************************
@@ -1860,11 +1853,11 @@ static int ebpf_vfs_load_bpf(ebpf_module_t *em)
}
#ifdef LIBBPF_MAJOR_VERSION
else {
- bpf_obj = vfs_bpf__open();
- if (!bpf_obj)
+ vfs_bpf_obj = vfs_bpf__open();
+ if (!vfs_bpf_obj)
ret = -1;
else
- ret = ebpf_vfs_load_and_attach(bpf_obj, em);
+ ret = ebpf_vfs_load_and_attach(vfs_bpf_obj, em);
}
#endif
@@ -1895,7 +1888,6 @@ void *ebpf_vfs_thread(void *ptr)
ebpf_adjust_thread_load(em, default_btf);
#endif
if (ebpf_vfs_load_bpf(em)) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
goto endvfs;
}
@@ -1910,6 +1902,12 @@ void *ebpf_vfs_thread(void *ptr)
pthread_mutex_lock(&lock);
ebpf_create_global_charts(em);
ebpf_update_stats(&plugin_statistics, em);
+ ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps);
+#ifdef NETDATA_DEV_MODE
+ if (ebpf_aral_vfs_pid)
+ ebpf_statistic_create_aral_chart(NETDATA_EBPF_VFS_ARAL_NAME, em);
+#endif
+
pthread_mutex_unlock(&lock);
vfs_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_vfs.h b/collectors/ebpf.plugin/ebpf_vfs.h
index d7fc2672f..45a1df4b1 100644
--- a/collectors/ebpf.plugin/ebpf_vfs.h
+++ b/collectors/ebpf.plugin/ebpf_vfs.h
@@ -69,6 +69,9 @@
#define NETDATA_SYSTEMD_VFS_FSYNC_CONTEXT "services.vfs_fsync"
#define NETDATA_SYSTEMD_VFS_FSYNC_ERROR_CONTEXT "services.vfs_fsync_error"
+// ARAL name
+#define NETDATA_EBPF_VFS_ARAL_NAME "ebpf_vfs"
+
typedef struct netdata_publish_vfs {
uint64_t pid_tgid;
uint32_t pid;
@@ -164,10 +167,9 @@ enum netdata_vfs_calls_name {
NETDATA_VFS_END_LIST
};
-extern netdata_publish_vfs_t **vfs_pid;
-
void *ebpf_vfs_thread(void *ptr);
void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr);
+void ebpf_vfs_release(netdata_publish_vfs_t *stat);
extern netdata_ebpf_targets_t vfs_targets[];
extern struct config vfs_config;
diff --git a/collectors/ebpf.plugin/metrics.csv b/collectors/ebpf.plugin/metrics.csv
new file mode 100644
index 000000000..5714c9767
--- /dev/null
+++ b/collectors/ebpf.plugin/metrics.csv
@@ -0,0 +1,197 @@
+metric,scope,dimensions,unit,description,chart_type,labels,plugin,module
+cgroup.fd_open,cgroup,open,calls/s,Number of open files,line,,ebpf.plugin,filedescriptor
+cgroup.fd_open_error,cgroup,open,calls/s,Fails to open files,line,,ebpf.plugin,filedescriptor
+cgroup.fd_closed,cgroup,close,calls/s,Files closed,line,,ebpf.plugin,filedescriptor
+cgroup.fd_close_error,cgroup,close,calls/s,Fails to close files,line,,ebpf.plugin,filedescriptor
+services.file_open,,a dimension per systemd service,calls/s,Number of open files,stacked,,ebpf.plugin,filedescriptor
+services.file_open_error,,a dimension per systemd service,calls/s,Fails to open files,stacked,,ebpf.plugin,filedescriptor
+services.file_closed,,a dimension per systemd service,calls/s,Files closed,stacked,,ebpf.plugin,filedescriptor
+services.file_close_error,,a dimension per systemd service,calls/s,Fails to close files,stacked,,ebpf.plugin,filedescriptor
+apps.file_open,,a dimension per app group,calls/s,Number of open files,stacked,,ebpf.plugin,filedescriptor
+apps.file_open_error,,a dimension per app group,calls/s,Fails to open files,stacked,,ebpf.plugin,filedescriptor
+apps.file_closed,,a dimension per app group,calls/s,Files closed,stacked,,ebpf.plugin,filedescriptor
+apps.file_close_error,,a dimension per app group,calls/s,Fails to close files,stacked,,ebpf.plugin,filedescriptor
+filesystem.file_descriptor,,"open, close",calls/s,Open and close calls,line,,ebpf.plugin,filedescriptor
+filesystem.file_error,,"open, close",calls/s,Open fails,line,,ebpf.plugin,filedescriptor
+system.process_thread,,process,calls/s,Start process,line,,ebpf.plugin,processes
+system.process_status,,"process, zombie",difference,Process not closed,line,,ebpf.plugin,processes
+system.exit,,process,calls/s,Exit process,line,,ebpf.plugin,processes
+system.task_error,,task,calls/s,Fails to create process,line,,ebpf.plugin,processes
+apps.process_create,,a dimension per app group,calls/s,Process started,stacked,,ebpf.plugin,processes
+apps.thread_create,,a dimension per app group,calls/s,Threads started,stacked,,ebpf.plugin,processes
+apps.task_exit,,a dimension per app group,calls/s,Tasks starts exit process,stacked,,ebpf.plugin,processes
+apps.task_close,,a dimension per app group,calls/s,Tasks closed,stacked,,ebpf.plugin,processes
+apps.task_error,,a dimension per app group,calls/s,Errors to create process or threads,stacked,,ebpf.plugin,processes
+cgroup.process_create,cgroup,process,calls/s,Process started,line,,ebpf.plugin,processes
+cgroup.thread_create,cgroup,thread,calls/s,Threads started,line,,ebpf.plugin,processes
+cgroup.task_exit,cgroup,exit,calls/s,Tasks starts exit process,line,,ebpf.plugin,processes
+cgroup.task_close,cgroup,process,calls/s,Tasks closed,line,,ebpf.plugin,processes
+cgroup.task_error,cgroup,process,calls/s,Errors to create process or threads,line,,ebpf.plugin,processes
+services.process_create,cgroup,a dimension per systemd service,calls/s,Process started,stacked,,ebpf.plugin,processes
+services.thread_create,cgroup,a dimension per systemd service,calls/s,Threads started,stacked,,ebpf.plugin,processes
+services.task_close,cgroup,a dimension per systemd service,calls/s,Tasks starts exit process,stacked,,ebpf.plugin,processes
+services.task_exit,cgroup,a dimension per systemd service,calls/s,Tasks closed,stacked,,ebpf.plugin,processes
+services.task_error,cgroup,a dimension per systemd service,calls/s,Errors to create process or threads,stacked,,ebpf.plugin,processes
+disk.latency_io,disk,latency,calls/s,Disk latency,stacked,,ebpf.plugin,disk
+system.hardirq_latency,,hardirq names,milisecondds,Hardware IRQ latency,stacked,,ebpf.plugin,hardirq
+apps.cachestat_ratio,,a dimension per app group,%,Hit ratio,line,,ebpf.plugin,cachestat
+apps.cachestat_dirties,,a dimension per app group,page/s,Number of dirty pages,stacked,,ebpf.plugin,cachestat
+apps.cachestat_hits,,a dimension per app group,hits/s,Number of accessed files,stacked,,ebpf.plugin,cachestat
+apps.cachestat_misses,,a dimension per app group,misses/s,Files out of page cache,stacked,,ebpf.plugin,cachestat
+services.cachestat_ratio,,a dimension per systemd service,%,Hit ratio,line,,ebpf.plugin,cachestat
+services.cachestat_dirties,,a dimension per systemd service,page/s,Number of dirty pages,line,,ebpf.plugin,cachestat
+services.cachestat_hits,,a dimension per systemd service,hits/s,Number of accessed files,line,,ebpf.plugin,cachestat
+services.cachestat_misses,,a dimension per systemd service,misses/s,Files out of page cache,line,,ebpf.plugin,cachestat
+cgroup.cachestat_ratio,cgroup,ratio,%,Hit ratio,line,,ebpf.plugin,cachestat
+cgroup.cachestat_dirties,cgroup,dirty,page/s,Number of dirty pages,line,,ebpf.plugin,cachestat
+cgroup.cachestat_hits,cgroup,hit,hits/s,Number of accessed files,line,,ebpf.plugin,cachestat
+cgroup.cachestat_misses,cgroup,miss,misses/s,Files out of page cache,line,,ebpf.plugin,cachestat
+mem.file_sync,,"fsync, fdatasync",calls/s,Monitor calls for <code>fsync(2)</code> and <code>fdatasync(2)</code>.,stacked,,ebpf.plugin,sync
+mem.meory_map,,msync,calls/s,Monitor calls for <code>msync(2)</code>.,line,,ebpf.plugin,sync
+mem.sync,,"sync, syncfs",calls/s,Monitor calls for <code>sync(2)</code> and <code>syncfs(2)</code>.,line,,ebpf.plugin,sync
+mem.file_segment,,sync_file_range,calls/s,Monitor calls for <code>sync_file_range(2)</code>.,line,,ebpf.plugin,sync
+mem.cachestat_ratio,,ratio,%,Hit ratio,line,,ebpf.plugin,cachestat
+mem.cachestat_dirties,,dirty,page/s,Number of dirty pages,line,,ebpf.plugin,cachestat
+mem.cachestat_hits,,hit,hits/s,Number of accessed files,line,,ebpf.plugin,cachestat
+mem.cachestat_misses,,miss,misses/s,Files out of page cache,line,,ebpf.plugin,cachestat
+mdstat.mdstat_flush,,disk,flushes,MD flushes,stacked,,ebpf.plugin,mdflush
+cgroup.swap_read,cgroup,read,calls/s,Calls to function <code>swap_readpage</code>.,line,,ebpf.plugin,swap
+cgroup.swap_write,cgroup,write,calls/s,Calls to function <code>swap_writepage</code>.,line,,ebpf.plugin,swap
+services.swap_read,,a dimension per systemd service,calls/s,Calls to <code>swap_readpage</code>.,stacked,,ebpf.plugin,swap
+services.swap_write,,a dimension per systemd service,calls/s,Calls to function <code>swap_writepage</code>.,stacked,,ebpf.plugin,swap
+apps.swap_read_call,,a dimension per app group,calls/s,Calls to function <code>swap_readpage</code>.,stacked,,ebpf.plugin,swap
+apps.swap_write_call,,a dimension per app group,calls/s,Calls to function <code>swap_writepage</code>.,stacked,,ebpf.plugin,swap
+system.swapcalls,,"write, read",calls/s,Calls to access swap memory,line,,ebpf.plugin,swap
+cgroup.oomkills,cgroup,cgroup name,kills,OOM kills. This chart is provided by eBPF plugin.,line,,ebpf.plugin,oomkill
+services.oomkills,,a dimension per systemd service,kills,OOM kills. This chart is provided by eBPF plugin.,line,,ebpf.plugin,oomkill
+apps.oomkills,,a dimension per app group,kills,OOM kills,stacked,,ebpf.plugin,oomkill
+ip.inbound_conn,,connection_tcp,connections/s,Inbound connections.,line,,ebpf.plugin,socket
+ip.tcp_outbound_conn,,received,connections/s,TCP outbound connections.,line,,ebpf.plugin,socket
+ip.tcp_functions,,"received, send, closed",calls/s,Calls to internal functions,line,,ebpf.plugin,socket
+ip.total_tcp_bandwidth,,"received, send",kilobits/s,TCP bandwidth,line,,ebpf.plugin,socket
+ip.tcp_error,,"received, send",calls/s,TCP errors,line,,ebpf.plugin,socket
+ip.tcp_retransmit,,retransmited,calls/s,Packages retransmitted,line,,ebpf.plugin,socket
+ip.udp_functions,,"received, send",calls/s,UDP calls,line,,ebpf.plugin,socket
+ip.total_udp_bandwidth,,"received, send",kilobits/s,UDP bandwidth,line,,ebpf.plugin,socket
+ip.udp_error,,"received, send",calls/s,UDP errors,line,,ebpf.plugin,socket
+apps.outbound_conn_v4,,a dimension per app group,connections/s,Calls to tcp_v4_connection,stacked,,ebpf.plugin,socket
+apps.outbound_conn_v6,,a dimension per app group,connections/s,Calls to tcp_v6_connection,stacked,,ebpf.plugin,socket
+apps.total_bandwidth_sent,,a dimension per app group,kilobits/s,Bytes sent,stacked,,ebpf.plugin,socket
+apps.total_bandwidth_recv,,a dimension per app group,kilobits/s,bytes received,stacked,,ebpf.plugin,socket
+apps.bandwidth_tcp_send,,a dimension per app group,calls/s,Calls for tcp_sendmsg,stacked,,ebpf.plugin,socket
+apps.bandwidth_tcp_recv,,a dimension per app group,calls/s,Calls for tcp_cleanup_rbuf,stacked,,ebpf.plugin,socket
+apps.bandwidth_tcp_retransmit,,a dimension per app group,calls/s,Calls for tcp_retransmit,stacked,,ebpf.plugin,socket
+apps.bandwidth_udp_send,,a dimension per app group,calls/s,Calls for udp_sendmsg,stacked,,ebpf.plugin,socket
+apps.bandwidth_udp_recv,,a dimension per app group,calls/s,Calls for udp_recvmsg,stacked,,ebpf.plugin,socket
+cgroup.net_conn_ipv4,cgroup,connected_v4,connections/s,Calls to tcp_v4_connection,line,,ebpf.plugin,socket
+cgroup.net_conn_ipv6,cgroup,connected_v6,connections/s,Calls to tcp_v6_connection,line,,ebpf.plugin,socket
+cgroup.net_bytes_recv,cgroup,received,calls/s,Bytes received,line,,ebpf.plugin,socket
+cgroup.net_bytes_sent,cgroup,sent,calls/s,Bytes sent,line,,ebpf.plugin,socket
+cgroup.net_tcp_recv,cgroup,received,calls/s,Calls to tcp_cleanup_rbuf.,line,,ebpf.plugin,socket
+cgroup.net_tcp_send,cgroup,sent,calls/s,Calls to tcp_sendmsg.,line,,ebpf.plugin,socket
+cgroup.net_retransmit,cgroup,retransmitted,calls/s,Calls to tcp_retransmit.,line,,ebpf.plugin,socket
+cgroup.net_udp_send,cgroup,sent,calls/s,Calls to udp_sendmsg,line,,ebpf.plugin,socket
+cgroup.net_udp_recv,cgroup,received,calls/s,Calls to udp_recvmsg,line,,ebpf.plugin,socket
+services.net_conn_ipv4,,a dimension per systemd service,connections/s,Calls to tcp_v4_connection,stacked,,ebpf.plugin,socket
+services.net_conn_ipv6,,a dimension per systemd service,connections/s,Calls to tcp_v6_connection,stacked,,ebpf.plugin,socket
+services.net_bytes_recv,,a dimension per systemd service,kilobits/s,Bytes received,stacked,,ebpf.plugin,socket
+services.net_bytes_sent,,a dimension per systemd service,kilobits/s,Bytes sent,stacked,,ebpf.plugin,socket
+services.net_tcp_recv,,a dimension per systemd service,calls/s,Calls to tcp_cleanup_rbuf.,stacked,,ebpf.plugin,socket
+services.net_tcp_send,,a dimension per systemd service,calls/s,Calls to tcp_sendmsg.,stacked,,ebpf.plugin,socket
+services.net_tcp_retransmit,,a dimension per systemd service,calls/s,Calls to tcp_retransmit,stacked,,ebpf.plugin,socket
+services.net_udp_send,,a dimension per systemd service,calls/s,Calls to udp_sendmsg,stacked,,ebpf.plugin,socket
+services.net_udp_recv,,a dimension per systemd service,calls/s,Calls to udp_recvmsg,stacked,,ebpf.plugin,socket
+apps.dc_ratio,,a dimension per app group,%,Percentage of files inside directory cache,line,,ebpf.plugin,dcstat
+apps.dc_reference,,a dimension per app group,files,Count file access,stacked,,ebpf.plugin,dcstat
+apps.dc_not_cache,,a dimension per app group,files,Files not present inside directory cache,stacked,,ebpf.plugin,dcstat
+apps.dc_not_found,,a dimension per app group,files,Files not found,stacked,,ebpf.plugin,dcstat
+cgroup.dc_ratio,cgroup,ratio,%,Percentage of files inside directory cache,line,,ebpf.plugin,dcstat
+cgroup.dc_reference,cgroup,reference,files,Count file access,line,,ebpf.plugin,dcstat
+cgroup.dc_not_cache,cgroup,slow,files,Files not present inside directory cache,line,,ebpf.plugin,dcstat
+cgroup.dc_not_found,cgroup,miss,files,Files not found,line,,ebpf.plugin,dcstat
+services.dc_ratio,,a dimension per systemd service,%,Percentage of files inside directory cache,line,,ebpf.plugin,dcstat
+services.dc_reference,,a dimension per systemd service,files,Count file access,line,,ebpf.plugin,dcstat
+services.dc_not_cache,,a dimension per systemd service,files,Files not present inside directory cache,line,,ebpf.plugin,dcstat
+services.dc_not_found,,a dimension per systemd service,files,Files not found,line,,ebpf.plugin,dcstat
+filesystem.dc_hit_ratio,,ratio,%,Percentage of files inside directory cache,line,,ebpf.plugin,dcstat
+filesystem.dc_reference,filesystem,"reference, slow, miss",files,Variables used to calculate hit ratio.,line,,ebpf.plugin,dcstat
+filesystem.read_latency,filesystem,latency period,calls/s,ext4 latency for each read request.,stacked,,ebpf.plugin,filesystem
+filesystem.write_latency,iilesystem,latency period,calls/s,ext4 latency for each write request.,stacked,,ebpf.plugin,filesystem
+filesystem.open_latency,filesystem,latency period,calls/s,ext4 latency for each open request.,stacked,,ebpf.plugin,filesystem
+filesystem.sync_latency,filesystem,latency period,calls/s,ext4 latency for each sync request.,stacked,,ebpf.plugin,filesystem
+filesystem.attributte_latency,,latency period,calls/s,nfs latency for each attribute request.,stacked,,ebpf.plugin,filesystem
+cgroup.shmget,cgroup,get,calls/s,Calls to syscall <code>shmget(2)</code>.,line,,ebpf.plugin,shm
+cgroup.shmat,cgroup,at,calls/s,Calls to syscall <code>shmat(2)</code>.,line,,ebpf.plugin,shm
+cgroup.shmdt,cgroup,dt,calls/s,Calls to syscall <code>shmdt(2)</code>.,line,,ebpf.plugin,shm
+cgroup.shmctl,cgroup,ctl,calls/s,Calls to syscall <code>shmctl(2)</code>.,line,,ebpf.plugin,shm
+services.shmget,,a dimension per systemd service,calls/s,Calls to syscall <code>shmget(2)</code>.,stacked,,ebpf.plugin,shm
+services.shmat,,a dimension per systemd service,calls/s,Calls to syscall <code>shmat(2)</code>.,stacked,,ebpf.plugin,shm
+services.shmdt,,a dimension per systemd service,calls/s,Calls to syscall <code>shmdt(2)</code>.,stacked,,ebpf.plugin,shm
+services.shmctl,,a dimension per systemd service,calls/s,Calls to syscall <code>shmctl(2)</code>.,stacked,,ebpf.plugin,shm
+apps.shmget_call,,a dimension per app group,calls/s,Calls to syscall <code>shmget(2)</code>.,stacked,,ebpf.plugin,shm
+apps.shmat_call,,a dimension per app group,calls/s,Calls to syscall <code>shmat(2)</code>.,stacked,,ebpf.plugin,shm
+apps.shmdt_call,,a dimension per app group,calls/s,Calls to syscall <code>shmdt(2)</code>.,stacked,,ebpf.plugin,shm
+apps.shmctl_call,,a dimension per app group,calls/s,Calls to syscall <code>shmctl(2)</code>.,stacked,,ebpf.plugin,shm
+system.shared_memory_calls,,"get, at, dt, ctl",calls/s,Calls to shared memory system calls,line,,ebpf.plugin,shm
+system.softirq_latency,,soft IRQs,miliseconds,Software IRQ latency,stacked,,ebpf.plugin,softirq
+mount_points.call,,"mount, umount",calls/s,Calls to mount and umount syscalls,line,,ebpf.plugin,mount
+mount_points.error,,"mount, umount",calls/s,Errors to mount and umount file systems,line,,ebpf.plugin,mount
+cgroup.vfs_unlink,cgroup,delete,calls/s,Files deleted,line,,ebpf.plugin,vfs
+cgroup.vfs_write,cgroup,write,calls/s,Write to disk,line,,ebpf.plugin,vfs
+cgroup.vfs_write_error,cgroup,write,calls/s,Fails to write,line,,ebpf.plugin,vfs
+cgroup.vfs_read,cgroup,read,calls/s,Read from disk,line,,ebpf.plugin,vfs
+cgroup.vfs_read_error,cgroup,read,calls/s,Fails to read,line,,ebpf.plugin,vfs
+cgroup.vfs_write_bytes,cgroup,write,bytes/s,Bytes written on disk,line,,ebpf.plugin,vfs
+cgroup.vfs_read_bytes,cgroup,read,bytes/s,Bytes read from disk,line,,ebpf.plugin,vfs
+cgroup.vfs_fsync,cgroup,fsync,calls/s,Calls for <code>vfs_fsync</code>,line,,ebpf.plugin,vfs
+cgroup.vfs_fsync_error,cgroup,fsync,calls/s,Sync error,line,,ebpf.plugin,vfs
+cgroup.vfs_open,cgroup,open,calls/s,Calls for <code>vfs_open</code>,line,,ebpf.plugin,vfs
+cgroup.vfs_open_error,cgroup,open,calls/s,Open error,line,,ebpf.plugin,vfs
+cgroup.vfs_create,cgroup,create,calls/s,Calls for <code>vfs_create</code>,line,,ebpf.plugin,vfs
+cgroup.vfs_create_error,cgroup,create,calls/s,Create error,line,,ebpf.plugin,vfs
+services.vfs_unlink,,a dimension per systemd service,calls/s,Files deleted,stacked,,ebpf.plugin,vfs
+services.vfs_write,,a dimension per systemd service,calls/s,Write to disk,stacked,,ebpf.plugin,vfs
+services.vfs_write_error,,a dimension per systemd service,calls/s,Fails to write,stacked,,ebpf.plugin,vfs
+services.vfs_read,,a dimension per systemd service,calls/s,Read from disk,stacked,,ebpf.plugin,vfs
+services.vfs_read_error,,a dimension per systemd service,calls/s,Fails to read,stacked,,ebpf.plugin,vfs
+services.vfs_write_bytes,,a dimension per systemd service,bytes/s,Bytes written on disk,stacked,,ebpf.plugin,vfs
+services.vfs_read_bytes,,a dimension per systemd service,bytes/s,Bytes read from disk,stacked,,ebpf.plugin,vfs
+services.vfs_fsync,,a dimension per systemd service,calls/s,Calls to <code>vfs_fsync</code>,stacked,,ebpf.plugin,vfs
+services.vfs_fsync_error,,a dimension per systemd service,calls/s,Sync error,stacked,,ebpf.plugin,vfs
+services.vfs_open,,a dimension per systemd service,calls/s,Calls to <code>vfs_open</code>,stacked,,ebpf.plugin,vfs
+services.vfs_open_error,,a dimension per systemd service,calls/s,Open error,stacked,,ebpf.plugin,vfs
+services.vfs_create,,a dimension per systemd service,calls/s,Calls to <code>vfs_create</code>,stacked,,ebpf.plugin,vfs
+services.vfs_create_error,,a dimension per systemd service,calls/s,Create error,stacked,,ebpf.plugin,vfs
+filesystem.vfs_deleted_objects,,delete,calls/s,Remove files,line,,ebpf.plugin,vfs
+filesystem.vfs_io,,"read, write",calls/s,Calls to IO,line,,ebpf.plugin,vfs
+filesystem.vfs_io_bytes,,"read, write",bytes/s,Bytes written and read,line,,ebpf.plugin,vfs
+filesystem.vfs_io_error,,"read, write",calls/s,Fails to write or read,line,,ebpf.plugin,vfs
+filesystem.vfs_fsync,,fsync,calls/s,Calls for <code>vfs_fsync</code>,line,,ebpf.plugin,vfs
+filesystem.vfs_fsync_error,,fsync,calls/s,Fails to synchronize,line,,ebpf.plugin,vfs
+filesystem.vfs_open,,open,calls/s,Calls for <code>vfs_open</code>,line,,ebpf.plugin,vfs
+filesystem.vfs_open_error,,open,calls/s,Fails to open a file,line,,ebpf.plugin,vfs
+filesystem.vfs_create,,create,calls/s,Calls for <code>vfs_create</code>,line,,ebpf.plugin,vfs
+filesystem.vfs_create_error,,create,calls/s,Fails to create a file.,line,,ebpf.plugin,vfs
+apps.file_deleted,,a dimension per app group,calls/s,Files deleted,stacked,,ebpf.plugin,vfs
+apps.vfs_write_call,,a dimension per app group,calls/s,Write to disk,stacked,,ebpf.plugin,vfs
+apps.vfs_write_error,,a dimension per app group,calls/s,Fails to write,stacked,,ebpf.plugin,vfs
+apps.vfs_read_call,,a dimension per app group,calls/s,Read from disk,stacked,,ebpf.plugin,vfs
+apps.vfs_read_error,,a dimension per app group,calls/s,Fails to read,stacked,,ebpf.plugin,vfs
+apps.vfs_write_bytes,,a dimension per app group,bytes/s,Bytes written on disk,stacked,,ebpf.plugin,vfs
+apps.vfs_read_bytes,,a dimension per app group,bytes/s,Bytes read on disk,stacked,,ebpf.plugin,vfs
+apps.vfs_fsync,,a dimension per app group,calls/s,Calls for <code>vfs_fsync</code>,stacked,,ebpf.plugin,vfs
+apps.vfs_fsync_error,,a dimension per app group,calls/s,Sync error,stacked,,ebpf.plugin,vfs
+apps.vfs_open,,a dimension per app group,calls/s,Calls for <code>vfs_open</code>,stacked,,ebpf.plugin,vfs
+apps.vfs_open_error,,a dimension per app group,calls/s,Open error,stacked,,ebpf.plugin,vfs
+apps.vfs_create,,a dimension per app group,calls/s,Calls for <code>vfs_create</code>,stacked,,ebpf.plugin,vfs
+apps.vfs_create_error,,a dimension per app group,calls/s,Create error,stacked,,ebpf.plugin,vfs
+netdata.ebpf_aral_stat_size,,memory,bytes,Bytes allocated for ARAL.,stacked,,ebpf.plugin,process
+netdata.ebpf_aral_stat_alloc,,aral,calls,Calls to allocate memory.,stacked,,ebpf.plugin,process
+netdata.ebpf_threads,,"total, running",threads,Threads info,line,,ebpf.plugin,process
+netdata.ebpf_load_methods,,"legacy, co-re",methods,Load info,line,,ebpf.plugin,process
+netdata.ebpf_kernel_memory,,memory_locked,bytes,Memory allocated for hash tables.,line,,ebpf.plugin,process
+netdata.ebpf_hash_tables_count,,hash_table,hash tables,Number of hash tables loaded,line,,ebpf.plugin,process
+netdata.ebpf_aral_stat_size,,memory,bytes,Bytes allocated for ARAL,stacked,,ebpf.plugin,process
+netdata.ebpf_aral_stat_alloc,,aral,calls,Calls to allocate memory,stacked,,ebpf.plugin,process
+netdata.ebpf_aral_stat_size,,memory,bytes,Bytes allocated for ARAL.,stacked,,ebpf.plugin,process
+netdata.ebpf_aral_stat_alloc,,aral,calls,Calls to allocate memory,stacked,,ebpf.plugin,process