summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin/ebpf_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/ebpf.plugin/ebpf_process.c')
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c96
1 files changed, 82 insertions, 14 deletions
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index 66af47857..17a9809d3 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -18,17 +18,33 @@ static char *status[] = { "process", "zombie" };
static ebpf_local_maps_t process_maps[] = {{.name = "tbl_pid_stats", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0,
.type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "tbl_total_stats", .internal_input = NETDATA_KEY_END_VECTOR,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = "process_ctrl", .internal_input = NETDATA_CONTROLLER_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = NULL, .internal_input = 0, .user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
char *tracepoint_sched_type = { "sched" } ;
char *tracepoint_sched_process_exit = { "sched_process_exit" };
@@ -39,6 +55,7 @@ static int was_sched_process_exec_enabled = 0;
static int was_sched_process_fork_enabled = 0;
static netdata_idx_t *process_hash_values = NULL;
+ebpf_process_stat_t *process_stat_vector = NULL;
static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END];
static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END];
@@ -55,6 +72,7 @@ static char *threads_stat[NETDATA_EBPF_THREAD_STAT_END] = {"total", "running"};
static char *load_event_stat[NETDATA_EBPF_LOAD_STAT_END] = {"legacy", "co-re"};
static char *memlock_stat = {"memory_locked"};
static char *hash_table_stat = {"hash_table"};
+static char *hash_table_core[NETDATA_EBPF_LOAD_STAT_END] = {"per_core", "unique"};
/*****************************************************************
*
@@ -251,8 +269,10 @@ void ebpf_process_send_apps_data(struct ebpf_target *root, ebpf_module_t *em)
/**
* Read the hash table and store data to allocated vectors.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_hash_global_tables()
+static void ebpf_read_process_hash_global_tables(int maps_per_core)
{
uint64_t idx;
netdata_idx_t res[NETDATA_KEY_END_VECTOR];
@@ -263,7 +283,7 @@ static void read_hash_global_tables()
if (!bpf_map_lookup_elem(fd, &idx, val)) {
uint64_t total = 0;
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs : 1;
for (i = 0; i < end; i++)
total += val[i];
@@ -285,13 +305,18 @@ static void read_hash_global_tables()
/**
* Update cgroup
*
- * Update cgroup data based in
+ * Update cgroup data based in PID running.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_process_cgroup()
+static void ebpf_update_process_cgroup(int maps_per_core)
{
ebpf_cgroup_target_t *ect ;
int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
+ size_t length = sizeof(ebpf_process_stat_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
struct pid_on_target2 *pids;
@@ -303,9 +328,15 @@ static void ebpf_update_process_cgroup()
memcpy(out, in, sizeof(ebpf_process_stat_t));
} else {
- if (bpf_map_lookup_elem(pid_fd, &pid, out)) {
+ if (bpf_map_lookup_elem(pid_fd, &pid, process_stat_vector)) {
memset(out, 0, sizeof(ebpf_process_stat_t));
}
+
+ ebpf_process_apps_accumulator(process_stat_vector, maps_per_core);
+
+ memcpy(out, process_stat_vector, sizeof(ebpf_process_stat_t));
+
+ memset(process_stat_vector, 0, length);
}
}
}
@@ -507,6 +538,35 @@ static inline void ebpf_create_statistic_hash_tables(ebpf_module_t *em)
}
/**
+ * Create chart for percpu stats
+ *
+ * Write to standard output current values for threads.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static inline void ebpf_create_statistic_hash_per_core(ebpf_module_t *em)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ NETDATA_EBPF_HASH_TABLES_PER_CORE,
+ "How threads are loading hash/array tables.",
+ "threads",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ 140004,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE],
+ hash_table_core[NETDATA_EBPF_THREAD_PER_CORE],
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+
+ ebpf_write_global_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE],
+ hash_table_core[NETDATA_EBPF_THREAD_UNIQUE],
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+}
+
+/**
* Update Internal Metric variable
*
* By default eBPF.plugin sends internal metrics for netdata, but user can
@@ -541,6 +601,8 @@ static void ebpf_create_statistic_charts(ebpf_module_t *em)
ebpf_create_statistic_kernel_memory(em);
ebpf_create_statistic_hash_tables(em);
+
+ ebpf_create_statistic_hash_per_core(em);
}
/**
@@ -647,6 +709,7 @@ static void ebpf_process_exit(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
freez(process_hash_values);
+ freez(process_stat_vector);
ebpf_process_disable_tracepoints();
@@ -1010,6 +1073,11 @@ void ebpf_send_statistic_data()
write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_LOADED);
write_chart_dimension(hash_table_stat, (long long)plugin_statistics.hash_tables);
write_end_chart();
+
+ write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_PER_CORE);
+ write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE], (long long)plugin_statistics.hash_percpu);
+ write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE], (long long)plugin_statistics.hash_unique);
+ write_end_chart();
}
/**
@@ -1032,6 +1100,7 @@ static void process_collector(ebpf_module_t *em)
int update_every = em->update_every;
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
usec_t dt = heartbeat_next(&hb, USEC_PER_SEC);
(void)dt;
@@ -1041,14 +1110,14 @@ static void process_collector(ebpf_module_t *em)
if (++counter == update_every) {
counter = 0;
- read_hash_global_tables();
+ ebpf_read_process_hash_global_tables(maps_per_core);
netdata_apps_integration_flags_t apps_enabled = em->apps_charts;
pthread_mutex_lock(&collect_data_mutex);
if (ebpf_all_pids_count > 0) {
if (cgroups && shm_ebpf_cgroup.header) {
- ebpf_update_process_cgroup();
+ ebpf_update_process_cgroup(maps_per_core);
}
}
@@ -1099,6 +1168,7 @@ static void ebpf_process_allocate_global_vectors(size_t length)
memset(process_aggregated_data, 0, length * sizeof(netdata_syscall_stat_t));
memset(process_publish_aggregated, 0, length * sizeof(netdata_publish_syscall_t));
process_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+ process_stat_vector = callocz(ebpf_nprocs, sizeof(ebpf_process_stat_t));
global_process_stats = callocz((size_t)pid_max, sizeof(ebpf_process_stat_t *));
}
@@ -1195,8 +1265,7 @@ void *ebpf_process_thread(void *ptr)
set_local_pointers();
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- pthread_mutex_unlock(&lock);
- goto endprocess;
+ em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = NETDATA_THREAD_EBPF_STOPPING;
}
int algorithms[NETDATA_KEY_PUBLISH_PROCESS_END] = {
@@ -1225,7 +1294,6 @@ void *ebpf_process_thread(void *ptr)
process_collector(em);
-endprocess:
pthread_mutex_lock(&ebpf_exit_cleanup);
if (em->enabled == NETDATA_THREAD_EBPF_RUNNING)
ebpf_update_disabled_plugin_stats(em);