summaryrefslogtreecommitdiffstats
path: root/src/collectors/ebpf.plugin/ebpf_shm.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/collectors/ebpf.plugin/ebpf_shm.c (renamed from collectors/ebpf.plugin/ebpf_shm.c)403
1 files changed, 224 insertions, 179 deletions
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/src/collectors/ebpf.plugin/ebpf_shm.c
index f14eb67d0..816e68cfd 100644
--- a/collectors/ebpf.plugin/ebpf_shm.c
+++ b/src/collectors/ebpf.plugin/ebpf_shm.c
@@ -54,6 +54,17 @@ netdata_ebpf_targets_t shm_targets[] = { {.name = "shmget", .mode = EBPF_LOAD_TR
int shm_disable_priority;
#endif
+struct netdata_static_thread ebpf_read_shm = {
+ .name = "EBPF_READ_SHM",
+ .config_section = NULL,
+ .config_name = NULL,
+ .env_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = NULL
+};
+
#ifdef LIBBPF_MAJOR_VERSION
/*****************************************************************
*
@@ -89,7 +100,6 @@ static void ebpf_disable_probe(struct shm_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_shmat_probe, false);
bpf_program__set_autoload(obj->progs.netdata_shmdt_probe, false);
bpf_program__set_autoload(obj->progs.netdata_shmctl_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_probe, false);
}
/*
@@ -105,7 +115,6 @@ static void ebpf_disable_trampoline(struct shm_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_shmat_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_shmdt_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_shmctl_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_fentry, false);
}
/**
@@ -138,9 +147,6 @@ static void ebpf_set_trampoline_target(struct shm_bpf *obj)
shm_targets[NETDATA_KEY_SHMCTL_CALL].name, running_on_kernel);
bpf_program__set_attach_target(obj->progs.netdata_shmctl_fentry, 0,
syscall);
-
- bpf_program__set_attach_target(obj->progs.netdata_shm_release_task_fentry, 0,
- EBPF_COMMON_FNCT_CLEAN_UP);
}
/**
@@ -160,7 +166,7 @@ static int ebpf_shm_attach_probe(struct shm_bpf *obj)
obj->links.netdata_shmget_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmget_probe,
false, syscall);
- int ret = (int)libbpf_get_error(obj->links.netdata_shmget_probe);
+ long ret = libbpf_get_error(obj->links.netdata_shmget_probe);
if (ret)
return -1;
@@ -168,7 +174,7 @@ static int ebpf_shm_attach_probe(struct shm_bpf *obj)
shm_targets[NETDATA_KEY_SHMAT_CALL].name, running_on_kernel);
obj->links.netdata_shmat_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmat_probe,
false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_shmat_probe);
+ ret = libbpf_get_error(obj->links.netdata_shmat_probe);
if (ret)
return -1;
@@ -176,7 +182,7 @@ static int ebpf_shm_attach_probe(struct shm_bpf *obj)
shm_targets[NETDATA_KEY_SHMDT_CALL].name, running_on_kernel);
obj->links.netdata_shmdt_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmdt_probe,
false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_shmdt_probe);
+ ret = libbpf_get_error(obj->links.netdata_shmdt_probe);
if (ret)
return -1;
@@ -184,17 +190,10 @@ static int ebpf_shm_attach_probe(struct shm_bpf *obj)
shm_targets[NETDATA_KEY_SHMCTL_CALL].name, running_on_kernel);
obj->links.netdata_shmctl_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmctl_probe,
false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_shmctl_probe);
- if (ret)
- return -1;
-
- obj->links.netdata_shm_release_task_probe = bpf_program__attach_kprobe(obj->progs.netdata_shm_release_task_probe,
- false, EBPF_COMMON_FNCT_CLEAN_UP);
- ret = (int)libbpf_get_error(obj->links.netdata_shm_release_task_probe);
+ ret = libbpf_get_error(obj->links.netdata_shmctl_probe);
if (ret)
return -1;
-
return 0;
}
@@ -211,19 +210,6 @@ static void ebpf_shm_set_hash_tables(struct shm_bpf *obj)
}
/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_shm_disable_release_task(struct shm_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_fentry, false);
-}
-
-/**
* Adjust Map Size
*
* Resize maps according input from users.
@@ -271,8 +257,6 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e
}
ebpf_shm_adjust_map(obj, em);
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_shm_disable_release_task(obj);
int ret = shm_bpf__load(obj);
if (!ret) {
@@ -301,11 +285,11 @@ static void ebpf_obsolete_specific_shm_charts(char *type, int update_every);
*
* @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_obsolete_shm_services(ebpf_module_t *em)
+static void ebpf_obsolete_shm_services(ebpf_module_t *em, char *id)
{
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SHMGET_CHART,
- "",
"Calls to syscall shmget(2).",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_IPC_SHM_GROUP,
@@ -315,8 +299,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SHMAT_CHART,
- "",
"Calls to syscall shmat(2).",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_IPC_SHM_GROUP,
@@ -326,8 +310,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SHMDT_CHART,
- "",
"Calls to syscall shmdt(2).",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_IPC_SHM_GROUP,
@@ -337,8 +321,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SHMCTL_CHART,
- "",
"Calls to syscall shmctl(2).",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_IPC_SHM_GROUP,
@@ -358,12 +342,13 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em)
static inline void ebpf_obsolete_shm_cgroup_charts(ebpf_module_t *em) {
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_shm_services(em);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_shm_services(em, ect->name);
+
continue;
+ }
ebpf_obsolete_specific_shm_charts(ect->name, em->update_every);
}
@@ -381,6 +366,7 @@ void ebpf_obsolete_shm_apps_charts(struct ebpf_module *em)
{
struct ebpf_target *w;
int update_every = em->update_every;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SHM_IDX))))
continue;
@@ -431,6 +417,7 @@ void ebpf_obsolete_shm_apps_charts(struct ebpf_module *em)
w->charts_created &= ~(1<<EBPF_MODULE_SHM_IDX);
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -465,6 +452,9 @@ static void ebpf_shm_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (ebpf_read_shm.thread)
+ netdata_thread_cancel(*ebpf_read_shm.thread);
+
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
pthread_mutex_lock(&lock);
if (em->cgroup_charts) {
@@ -478,11 +468,6 @@ static void ebpf_shm_exit(void *ptr)
ebpf_obsolete_shm_global(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_shm_pid)
- ebpf_statistic_obsolete_aral_chart(em, shm_disable_priority);
-#endif
-
fflush(stdout);
pthread_mutex_unlock(&lock);
}
@@ -534,38 +519,16 @@ static void shm_apps_accumulator(netdata_publish_shm_t *out, int maps_per_core)
}
/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void shm_fill_pid(uint32_t current_pid, netdata_publish_shm_t *publish)
-{
- netdata_publish_shm_t *curr = shm_pid[current_pid];
- if (!curr) {
- curr = ebpf_shm_stat_get( );
- shm_pid[current_pid] = curr;
- }
-
- memcpy(curr, publish, sizeof(netdata_publish_shm_t));
-}
-
-/**
* Update cgroup
*
* Update cgroup data based in
*
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_shm_cgroup(int maps_per_core)
+static void ebpf_update_shm_cgroup()
{
netdata_publish_shm_t *cv = shm_vector;
- int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
size_t length = sizeof(netdata_publish_shm_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
ebpf_cgroup_target_t *ect;
@@ -577,20 +540,11 @@ static void ebpf_update_shm_cgroup(int maps_per_core)
for (pids = ect->pids; pids; pids = pids->next) {
int pid = pids->pid;
netdata_publish_shm_t *out = &pids->shm;
- if (likely(shm_pid) && shm_pid[pid]) {
- netdata_publish_shm_t *in = shm_pid[pid];
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_publish_shm_t *in = &local_pid->shm;
memcpy(out, in, sizeof(netdata_publish_shm_t));
- } else {
- if (!bpf_map_lookup_elem(fd, &pid, cv)) {
- shm_apps_accumulator(cv, maps_per_core);
-
- memcpy(out, cv, sizeof(netdata_publish_shm_t));
-
- // now that we've consumed the value, zero it out in the map.
- memset(cv, 0, length);
- bpf_map_update_elem(fd, &pid, cv, BPF_EXIST);
- }
}
}
}
@@ -604,33 +558,42 @@ static void ebpf_update_shm_cgroup(int maps_per_core)
*
* @param maps_per_core do I need to read all cores?
*/
-static void read_shm_apps_table(int maps_per_core)
+static void ebpf_read_shm_apps_table(int maps_per_core, int max_period)
{
netdata_publish_shm_t *cv = shm_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
size_t length = sizeof(netdata_publish_shm_t);
if (maps_per_core)
length *= ebpf_nprocs;
- while (pids) {
- key = pids->pid;
-
+ uint32_t key = 0, next_key = 0;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
if (bpf_map_lookup_elem(fd, &key, cv)) {
- pids = pids->next;
- continue;
+ goto end_shm_loop;
}
shm_apps_accumulator(cv, maps_per_core);
- shm_fill_pid(key, cv);
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, 0);
+ if (!local_pid)
+ goto end_shm_loop;
+
+ netdata_publish_shm_t *publish = &local_pid->shm;
+ if (!publish->ct || publish->ct != cv->ct) {
+ memcpy(publish, &cv[0], sizeof(netdata_publish_shm_t));
+ local_pid->not_updated = 0;
+ } else if (++local_pid->not_updated >= max_period){
+ bpf_map_delete_elem(fd, &key);
+ local_pid->not_updated = 0;
+ }
+
+end_shm_loop:
// now that we've consumed the value, zero it out in the map.
memset(cv, 0, length);
bpf_map_update_elem(fd, &key, cv, BPF_EXIST);
- pids = pids->next;
+ key = next_key;
}
}
@@ -689,10 +652,12 @@ static void ebpf_shm_read_global_table(netdata_idx_t *stats, int maps_per_core)
*/
static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct ebpf_pid_on_target *root)
{
+ memset(shm, 0, sizeof(netdata_publish_shm_t));
while (root) {
int32_t pid = root->pid;
- netdata_publish_shm_t *w = shm_pid[pid];
- if (w) {
+ ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(pid, 0);
+ if (pid_stat) {
+ netdata_publish_shm_t *w = &pid_stat->shm;
shm->get += w->get;
shm->at += w->at;
shm->dt += w->dt;
@@ -716,12 +681,11 @@ static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct ebpf_pid_on_tar
void ebpf_shm_send_apps_data(struct ebpf_target *root)
{
struct ebpf_target *w;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = root; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SHM_IDX))))
continue;
- ebpf_shm_sum_pids(&w->shm, w->root_pid);
-
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmget_call");
write_chart_dimension("calls", (long long) w->shm.get);
ebpf_write_end_chart();
@@ -738,6 +702,7 @@ void ebpf_shm_send_apps_data(struct ebpf_target *root)
write_chart_dimension("calls", (long long) w->shm.ctl);
ebpf_write_end_chart();
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -875,41 +840,82 @@ static void ebpf_obsolete_specific_shm_charts(char *type, int update_every)
**/
static void ebpf_create_systemd_shm_charts(int update_every)
{
- ebpf_create_charts_on_systemd(NETDATA_SHMGET_CHART,
- "Calls to syscall shmget(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20191,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_GET_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+ static ebpf_systemd_args_t data_shmget = {
+ .title = "Calls to syscall shmget(2).",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_IPC_SHM_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20191,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SHM_GET_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SHM,
+ .update_every = 0,
+ .suffix = NETDATA_SHMGET_CHART,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SHMAT_CHART,
- "Calls to syscall shmat(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20192,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_AT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+ static ebpf_systemd_args_t data_shmat = {
+ .title = "Calls to syscall shmat(2).",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_IPC_SHM_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20192,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SHM_AT_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SHM,
+ .update_every = 0,
+ .suffix = NETDATA_SHMAT_CHART,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SHMDT_CHART,
- "Calls to syscall shmdt(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20193,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_DT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+ static ebpf_systemd_args_t data_shmdt = {
+ .title = "Calls to syscall shmdt(2).",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_IPC_SHM_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20193,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SHM_DT_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SHM,
+ .update_every = 0,
+ .suffix = NETDATA_SHMDT_CHART,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SHMCTL_CHART,
- "Calls to syscall shmctl(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20193,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_CTL_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+ static ebpf_systemd_args_t data_shmctl = {
+ .title = "Calls to syscall shmctl(2).",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_IPC_SHM_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20194,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SHM_CTL_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SHM,
+ .update_every = 0,
+ .suffix = NETDATA_SHMCTL_CHART,
+ .dimension = "calls"
+ };
+
+ if (!data_shmget.update_every)
+ data_shmat.update_every = data_shmctl.update_every =
+ data_shmdt.update_every = data_shmget.update_every = update_every;
+
+ ebpf_cgroup_target_t *w;
+ for (w = ebpf_cgroup_pids; w; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_SHM_CHART))
+ continue;
+
+ data_shmat.id = data_shmctl.id = data_shmdt.id = data_shmget.id = w->name;
+ ebpf_create_charts_on_systemd(&data_shmat);
+
+ ebpf_create_charts_on_systemd(&data_shmctl);
+
+ ebpf_create_charts_on_systemd(&data_shmdt);
+
+ ebpf_create_charts_on_systemd(&data_shmget);
+
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_SHM_CHART;
+ }
}
/**
@@ -920,37 +926,27 @@ static void ebpf_create_systemd_shm_charts(int update_every)
static void ebpf_send_systemd_shm_charts()
{
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMGET_CHART, "");
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.get);
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_SHM_CHART)) ) {
+ continue;
}
- }
- ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMAT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.at);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SHMGET_CHART);
+ write_chart_dimension("calls", (long long)ect->publish_shm.get);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMDT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.dt);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SHMAT_CHART);
+ write_chart_dimension("calls", (long long)ect->publish_shm.at);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMCTL_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.ctl);
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SHMDT_CHART);
+ write_chart_dimension("calls", (long long)ect->publish_shm.dt);
+ ebpf_write_end_chart();
+
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SHMCTL_CHART);
+ write_chart_dimension("calls", (long long)ect->publish_shm.ctl);
+ ebpf_write_end_chart();
}
- ebpf_write_end_chart();
}
/*
@@ -987,17 +983,13 @@ static void ebpf_send_specific_shm_data(char *type, netdata_publish_shm_t *value
*/
void ebpf_shm_send_cgroup_data(int update_every)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
ebpf_shm_sum_cgroup_pids(&ect->publish_shm, ect->pids);
}
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_shm_charts(update_every);
}
@@ -1028,6 +1020,72 @@ void ebpf_shm_send_cgroup_data(int update_every)
}
/**
+ * Resume apps data
+ */
+void ebpf_shm_resume_apps_data() {
+ struct ebpf_target *w;
+ for (w = apps_groups_root_target; w; w = w->next) {
+ if (unlikely(!(w->charts_created & (1 << EBPF_MODULE_SHM_IDX))))
+ continue;
+
+ ebpf_shm_sum_pids(&w->shm, w->root_pid);
+ }
+}
+
+/**
+ * DCstat thread
+ *
+ * Thread used to generate dcstat charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_read_shm_thread(void *ptr)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ int maps_per_core = em->maps_per_core;
+ int update_every = em->update_every;
+
+ int counter = update_every - 1;
+
+ uint32_t lifetime = em->lifetime;
+ uint32_t running_time = 0;
+ usec_t period = update_every * USEC_PER_SEC;
+ int max_period = update_every * EBPF_CLEANUP_FACTOR;
+ while (!ebpf_plugin_exit && running_time < lifetime) {
+ (void)heartbeat_next(&hb, period);
+ if (ebpf_plugin_exit || ++counter != update_every)
+ continue;
+
+ netdata_thread_disable_cancelability();
+
+ pthread_mutex_lock(&collect_data_mutex);
+ ebpf_read_shm_apps_table(maps_per_core, max_period);
+ ebpf_shm_resume_apps_data();
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ counter = 0;
+
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (running_time && !em->running_time)
+ running_time = update_every;
+ else
+ running_time += update_every;
+
+ em->running_time = running_time;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
+ netdata_thread_enable_cancelability();
+ }
+
+ return NULL;
+}
+
+/**
* Main loop for this collector.
*/
static void shm_collector(ebpf_module_t *em)
@@ -1050,34 +1108,23 @@ static void shm_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
ebpf_shm_read_global_table(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps) {
- read_shm_apps_table(maps_per_core);
- }
+ pthread_mutex_lock(&lock);
if (cgroups) {
- ebpf_update_shm_cgroup(maps_per_core);
+ ebpf_update_shm_cgroup();
}
- pthread_mutex_lock(&lock);
-
shm_send_global();
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
ebpf_shm_send_apps_data(apps_groups_root_target);
}
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_shm_pid)
- ebpf_send_data_aral_chart(ebpf_aral_shm_pid, em);
-#endif
-
if (cgroups) {
ebpf_shm_send_cgroup_data(update_every);
}
pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
pthread_mutex_lock(&ebpf_exit_cleanup);
if (running_time && !em->running_time)
@@ -1186,12 +1233,8 @@ void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr)
*/
static void ebpf_shm_allocate_global_vectors(int apps)
{
- if (apps) {
- ebpf_shm_aral_init();
- shm_pid = callocz((size_t)pid_max, sizeof(netdata_publish_shm_t *));
- shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t));
- }
-
+ UNUSED(apps);
+ shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t));
shm_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
memset(shm_hash_values, 0, sizeof(shm_hash_values));
@@ -1310,13 +1353,15 @@ void *ebpf_shm_thread(void *ptr)
ebpf_create_shm_charts(em->update_every);
ebpf_update_stats(&plugin_statistics, em);
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_shm_pid)
- shm_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_SHM_ARAL_NAME, em);
-#endif
-
pthread_mutex_unlock(&lock);
+ ebpf_read_shm.thread = mallocz(sizeof(netdata_thread_t));
+ netdata_thread_create(ebpf_read_shm.thread,
+ ebpf_read_shm.name,
+ NETDATA_THREAD_OPTION_DEFAULT,
+ ebpf_read_shm_thread,
+ em);
+
shm_collector(em);
endshm: