summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin/ebpf_cachestat.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c92
1 files changed, 64 insertions, 28 deletions
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index b2b006dd3..5bbbe1f43 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -14,19 +14,34 @@ static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END];
static netdata_idx_t *cachestat_values = NULL;
ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = NETDATA_CACHESTAT_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "cstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = "cstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+#endif
+ }};
struct config cachestat_config = { .first_section = NULL,
.last_section = NULL,
@@ -233,10 +248,14 @@ static int ebpf_cachestat_attach_probe(struct cachestat_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*/
-static void ebpf_cachestat_adjust_map_size(struct cachestat_bpf *obj, ebpf_module_t *em)
+static void ebpf_cachestat_adjust_map(struct cachestat_bpf *obj, ebpf_module_t *em)
{
ebpf_update_map_size(obj->maps.cstat_pid, &cachestat_maps[NETDATA_CACHESTAT_PID_STATS],
em, bpf_map__name(obj->maps.cstat_pid));
+
+ ebpf_update_map_type(obj->maps.cstat_global, &cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS]);
+ ebpf_update_map_type(obj->maps.cstat_pid, &cachestat_maps[NETDATA_CACHESTAT_PID_STATS]);
+ ebpf_update_map_type(obj->maps.cstat_ctrl, &cachestat_maps[NETDATA_CACHESTAT_CTRL]);
}
/**
@@ -291,7 +310,7 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf
ebpf_cachestat_disable_specific_probe(obj);
}
- ebpf_cachestat_adjust_map_size(obj, em);
+ ebpf_cachestat_adjust_map(obj, em);
if (!em->apps_charts && !em->cgroup_charts)
ebpf_cachestat_disable_release_task(obj);
@@ -445,10 +464,11 @@ static void calculate_stats(netdata_publish_cachestat_t *publish) {
* Sum all values read from kernel and store in the first address.
*
* @param out the vector with read values.
+ * @param maps_per_core do I need to read all cores?
*/
-static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out)
+static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out, int maps_per_core)
{
- int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
netdata_cachestat_pid_t *total = &out[0];
for (i = 1; i < end; i++) {
netdata_cachestat_pid_t *w = &out[i];
@@ -504,14 +524,19 @@ static void cachestat_fill_pid(uint32_t current_pid, netdata_cachestat_pid_t *pu
* Read APPS table
*
* Read the apps table and store data inside the structure.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_apps_table()
+static void ebpf_read_cachestat_apps_table(int maps_per_core)
{
netdata_cachestat_pid_t *cv = cachestat_vector;
uint32_t key;
struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_cachestat_pid_t)*ebpf_nprocs;
+ size_t length = sizeof(netdata_cachestat_pid_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
while (pids) {
key = pids->pid;
@@ -520,7 +545,7 @@ static void read_apps_table()
continue;
}
- cachestat_apps_accumulator(cv);
+ cachestat_apps_accumulator(cv, maps_per_core);
cachestat_fill_pid(key, cv);
@@ -535,12 +560,16 @@ static void read_apps_table()
* Update cgroup
*
* Update cgroup data based in
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_cachestat_cgroup()
+static void ebpf_update_cachestat_cgroup(int maps_per_core)
{
netdata_cachestat_pid_t *cv = cachestat_vector;
int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_cachestat_pid_t) * ebpf_nprocs;
+ size_t length = sizeof(netdata_cachestat_pid_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
ebpf_cgroup_target_t *ect;
pthread_mutex_lock(&mutex_cgroup_shm);
@@ -559,7 +588,7 @@ static void ebpf_update_cachestat_cgroup()
continue;
}
- cachestat_apps_accumulator(cv);
+ cachestat_apps_accumulator(cv, maps_per_core);
memcpy(out, cv, sizeof(netdata_cachestat_pid_t));
}
@@ -627,8 +656,10 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
* Read global counter
*
* Read the table with number of calls for all functions
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_cachestat_read_global_table()
+static void ebpf_cachestat_read_global_table(int maps_per_core)
{
uint32_t idx;
netdata_idx_t *val = cachestat_hash_values;
@@ -638,7 +669,7 @@ static void ebpf_cachestat_read_global_table()
for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) {
if (!bpf_map_lookup_elem(fd, &idx, stored)) {
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs: 1;
netdata_idx_t total = 0;
for (i = 0; i < end; i++)
total += stored[i];
@@ -1053,6 +1084,7 @@ static void cachestat_collector(ebpf_module_t *em)
memset(&publish, 0, sizeof(publish));
int cgroups = em->cgroup_charts;
int update_every = em->update_every;
+ int maps_per_core = em->maps_per_core;
heartbeat_t hb;
heartbeat_init(&hb);
int counter = update_every - 1;
@@ -1065,13 +1097,13 @@ static void cachestat_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_cachestat_read_global_table();
+ ebpf_cachestat_read_global_table(maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
- read_apps_table();
+ ebpf_read_cachestat_apps_table(maps_per_core);
if (cgroups)
- ebpf_update_cachestat_cgroup();
+ ebpf_update_cachestat_cgroup(maps_per_core);
pthread_mutex_lock(&lock);
@@ -1216,6 +1248,10 @@ static int ebpf_cachestat_set_internal_value()
*/
static int ebpf_cachestat_load_bpf(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(cachestat_maps, em->maps_per_core, running_on_kernel);
+#endif
+
int ret = 0;
ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].mode);
if (em->load & EBPF_LOAD_LEGACY) {