summaryrefslogtreecommitdiffstats
path: root/collectors/cgroups.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-03-09 13:19:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-03-09 13:20:02 +0000
commit58daab21cd043e1dc37024a7f99b396788372918 (patch)
tree96771e43bb69f7c1c2b0b4f7374cb74d7866d0cb /collectors/cgroups.plugin
parentReleasing debian version 1.43.2-1. (diff)
downloadnetdata-58daab21cd043e1dc37024a7f99b396788372918.tar.xz
netdata-58daab21cd043e1dc37024a7f99b396788372918.zip
Merging upstream version 1.44.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--collectors/cgroups.plugin/Makefile.am10
-rw-r--r--collectors/cgroups.plugin/cgroup-charts.c1526
-rw-r--r--collectors/cgroups.plugin/cgroup-discovery.c1245
-rw-r--r--collectors/cgroups.plugin/cgroup-internals.h514
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-name.sh659
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-name.sh.in706
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-network-helper.sh320
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-network-helper.sh.in376
-rw-r--r--collectors/cgroups.plugin/cgroup-network.c39
-rw-r--r--collectors/cgroups.plugin/cgroup-top.c520
-rw-r--r--collectors/cgroups.plugin/integrations/containers.md15
-rw-r--r--collectors/cgroups.plugin/integrations/kubernetes_containers.md51
-rw-r--r--collectors/cgroups.plugin/integrations/libvirt_containers.md15
-rw-r--r--collectors/cgroups.plugin/integrations/lxc_containers.md15
-rw-r--r--collectors/cgroups.plugin/integrations/ovirt_containers.md15
-rw-r--r--collectors/cgroups.plugin/integrations/proxmox_containers.md15
-rw-r--r--collectors/cgroups.plugin/integrations/systemd_services.md2
-rw-r--r--collectors/cgroups.plugin/integrations/virtual_machines.md15
-rw-r--r--collectors/cgroups.plugin/metadata.yaml94
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.c3572
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.h4
-rw-r--r--collectors/cgroups.plugin/tests/test_doubles.c4
22 files changed, 5377 insertions, 4355 deletions
diff --git a/collectors/cgroups.plugin/Makefile.am b/collectors/cgroups.plugin/Makefile.am
index 354b9fbdc..0f6062420 100644
--- a/collectors/cgroups.plugin/Makefile.am
+++ b/collectors/cgroups.plugin/Makefile.am
@@ -3,11 +3,21 @@
AUTOMAKE_OPTIONS = subdir-objects
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+CLEANFILES = \
+ cgroup-name.sh \
+ cgroup-network-helper.sh \
+ $(NULL)
+
+include $(top_srcdir)/build/subst.inc
+SUFFIXES = .in
+
dist_plugins_SCRIPTS = \
cgroup-name.sh \
cgroup-network-helper.sh \
$(NULL)
dist_noinst_DATA = \
+ cgroup-name.sh.in \
+ cgroup-network-helper.sh.in \
README.md \
$(NULL)
diff --git a/collectors/cgroups.plugin/cgroup-charts.c b/collectors/cgroups.plugin/cgroup-charts.c
new file mode 100644
index 000000000..a89e8ac45
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-charts.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "cgroup-internals.h"
+
+void update_cpu_utilization_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_cpu;
+
+ if (unlikely(!cg->st_cpu)) {
+ char *title;
+ char *context;
+ int prio;
+
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services CPU utilization (100%% = 1 core)";
+ context = "systemd.service.cpu.utilization";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD;
+ } else {
+ title = k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU)" : "CPU Usage (100%% = 1 core)";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu" : "cgroup.cpu";
+ prio = cgroup_containers_chart_priority;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_cpu = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "cpu",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_STACKED);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+
+ if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ cg->st_cpu_rd_user = rrddim_add(chart, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+ cg->st_cpu_rd_system = rrddim_add(chart, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+ } else {
+ cg->st_cpu_rd_user = rrddim_add(chart, "user", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ cg->st_cpu_rd_system = rrddim_add(chart, "system", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ }
+
+ rrddim_set_by_pointer(chart, cg->st_cpu_rd_user, (collected_number)cg->cpuacct_stat.user);
+ rrddim_set_by_pointer(chart, cg->st_cpu_rd_system, (collected_number)cg->cpuacct_stat.system);
+ rrdset_done(chart);
+}
+
+void update_cpu_utilization_limit_chart(struct cgroup *cg, NETDATA_DOUBLE cpu_limit) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ RRDSET *chart = cg->st_cpu_limit;
+
+ if (unlikely(!cg->st_cpu_limit)) {
+ char *title = "CPU Usage within the limits";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_limit" : "cgroup.cpu_limit";
+ int prio = cgroup_containers_chart_priority - 1;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_cpu_limit = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "cpu_limit",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+
+ if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED))
+ rrddim_add(chart, "used", NULL, 1, system_hz, RRD_ALGORITHM_ABSOLUTE);
+ else
+ rrddim_add(chart, "used", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
+ cg->prev_cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100;
+ }
+
+ NETDATA_DOUBLE cpu_usage = 0;
+ cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100;
+ NETDATA_DOUBLE cpu_used = 100 * (cpu_usage - cg->prev_cpu_usage) / (cpu_limit * cgroup_update_every);
+
+ rrdset_isnot_obsolete___safe_from_collector_thread(chart);
+
+ rrddim_set(chart, "used", (cpu_used > 0) ? (collected_number)cpu_used : 0);
+
+ cg->prev_cpu_usage = cpu_usage;
+
+ rrdsetvar_custom_chart_variable_set(cg->st_cpu, cg->chart_var_cpu_limit, cpu_limit);
+ rrdset_done(chart);
+}
+
+void update_cpu_throttled_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ RRDSET *chart = cg->st_cpu_nr_throttled;
+
+ if (unlikely(!cg->st_cpu_nr_throttled)) {
+ char *title = "CPU Throttled Runnable Periods";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttled" : "cgroup.throttled";
+ int prio = cgroup_containers_chart_priority + 10;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_cpu_nr_throttled = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "throttled",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ rrddim_add(chart, "throttled", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set(chart, "throttled", (collected_number)cg->cpuacct_cpu_throttling.nr_throttled_perc);
+ rrdset_done(chart);
+}
+
+void update_cpu_throttled_duration_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ RRDSET *chart = cg->st_cpu_throttled_time;
+
+ if (unlikely(!cg->st_cpu_throttled_time)) {
+ char *title = "CPU Throttled Time Duration";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttled_duration" : "cgroup.throttled_duration";
+ int prio = cgroup_containers_chart_priority + 15;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_cpu_throttled_time = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "throttled_duration",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "ms",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ rrddim_add(chart, "duration", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set(chart, "duration", (collected_number)cg->cpuacct_cpu_throttling.throttled_time);
+ rrdset_done(chart);
+}
+
+void update_cpu_shares_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ RRDSET *chart = cg->st_cpu_shares;
+
+ if (unlikely(!cg->st_cpu_shares)) {
+ char *title = "CPU Time Relative Share";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_shares" : "cgroup.cpu_shares";
+ int prio = cgroup_containers_chart_priority + 20;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_cpu_shares = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "cpu_shares",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "shares",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ rrddim_add(chart, "shares", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set(chart, "shares", (collected_number)cg->cpuacct_cpu_shares.shares);
+ rrdset_done(chart);
+}
+
+void update_cpu_per_core_usage_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ char id[RRD_ID_LENGTH_MAX + 1];
+ unsigned int i;
+
+ if (unlikely(!cg->st_cpu_per_core)) {
+ char *title = k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU) Per Core" : "CPU Usage (100%% = 1 core) Per Core";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_per_core" : "cgroup.cpu_per_core";
+ int prio = cgroup_containers_chart_priority + 100;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ cg->st_cpu_per_core = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "cpu_per_core",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_STACKED);
+
+ rrdset_update_rrdlabels(cg->st_cpu_per_core, cg->chart_labels);
+
+ for (i = 0; i < cg->cpuacct_usage.cpus; i++) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
+ rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL);
+ }
+ }
+
+ for (i = 0; i < cg->cpuacct_usage.cpus; i++) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
+ rrddim_set(cg->st_cpu_per_core, id, (collected_number)cg->cpuacct_usage.cpu_percpu[i]);
+ }
+ rrdset_done(cg->st_cpu_per_core);
+}
+
+void update_mem_usage_detailed_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_mem;
+
+ if (unlikely(!cg->st_mem)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Memory";
+ context = "systemd.service.memory.ram.usage";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 15;
+ } else {
+ title = "Memory Usage";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem" : "cgroup.mem";
+ prio = cgroup_containers_chart_priority + 220;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+
+ chart = cg->st_mem = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "mem",
+ NULL,
+ "mem",
+ context,
+ title,
+ "MiB",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_STACKED);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+
+ if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ rrddim_add(chart, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ if (cg->memory.detailed_has_swap)
+ rrddim_add(chart, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ rrddim_add(chart, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ } else {
+ rrddim_add(chart, "anon", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "kernel_stack", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "slab", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "sock", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "anon_thp", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+ }
+
+ if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ rrddim_set(chart, "cache", (collected_number)cg->memory.total_cache);
+ collected_number rss = (collected_number)(cg->memory.total_rss - cg->memory.total_rss_huge);
+ if (rss < 0)
+ rss = 0;
+ rrddim_set(chart, "rss", rss);
+ if (cg->memory.detailed_has_swap)
+ rrddim_set(chart, "swap", (collected_number)cg->memory.total_swap);
+ rrddim_set(chart, "rss_huge", (collected_number)cg->memory.total_rss_huge);
+ rrddim_set(chart, "mapped_file", (collected_number)cg->memory.total_mapped_file);
+ } else {
+ rrddim_set(chart, "anon", (collected_number)cg->memory.anon);
+ rrddim_set(chart, "kernel_stack", (collected_number)cg->memory.kernel_stack);
+ rrddim_set(chart, "slab", (collected_number)cg->memory.slab);
+ rrddim_set(chart, "sock", (collected_number)cg->memory.sock);
+ rrddim_set(chart, "anon_thp", (collected_number)cg->memory.anon_thp);
+ rrddim_set(chart, "file", (collected_number)cg->memory.total_mapped_file);
+ }
+ rrdset_done(chart);
+}
+
+void update_mem_writeback_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_writeback;
+
+ if (unlikely(!cg->st_writeback)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Writeback Memory";
+ context = "systemd.service.memory.writeback";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20;
+ } else {
+ title = "Writeback Memory";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.writeback" : "cgroup.writeback";
+ prio = cgroup_containers_chart_priority + 300;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_writeback = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "writeback",
+ NULL,
+ "mem",
+ context,
+ title,
+ "MiB",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_AREA);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ if (cg->memory.detailed_has_dirty)
+ rrddim_add(chart, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ if (cg->memory.detailed_has_dirty)
+ rrddim_set(chart, "dirty", (collected_number)cg->memory.total_dirty);
+ rrddim_set(chart, "writeback", (collected_number)cg->memory.total_writeback);
+ rrdset_done(chart);
+}
+
+void update_mem_activity_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_mem_activity;
+
+ if (unlikely(!cg->st_mem_activity)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Memory Paging IO";
+ context = "systemd.service.memory.paging.io";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30;
+ } else {
+ title = "Memory Activity";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_activity" : "cgroup.mem_activity";
+ prio = cgroup_containers_chart_priority + 400;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_mem_activity = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "mem_activity",
+ NULL,
+ "mem",
+ context,
+ title,
+ "MiB/s",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ // FIXME: systemd just in, out
+ rrddim_add(chart, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(chart, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set(chart, "pgpgin", (collected_number)cg->memory.total_pgpgin);
+ rrddim_set(chart, "pgpgout", (collected_number)cg->memory.total_pgpgout);
+ rrdset_done(chart);
+}
+
+void update_mem_pgfaults_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_pgfaults;
+
+ if (unlikely(!cg->st_pgfaults)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Memory Page Faults";
+ context = "systemd.service.memory.paging.faults";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 25;
+ } else {
+ title = "Memory Page Faults";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.pgfaults" : "cgroup.pgfaults";
+ prio = cgroup_containers_chart_priority + 500;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_pgfaults = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "pgfaults",
+ NULL,
+ "mem",
+ context,
+ title,
+ "MiB/s",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ rrddim_add(chart, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(chart, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set(chart, "pgfault", (collected_number)cg->memory.total_pgfault);
+ rrddim_set(chart, "pgmajfault", (collected_number)cg->memory.total_pgmajfault);
+ rrdset_done(chart);
+}
+
+void update_mem_usage_limit_chart(struct cgroup *cg, unsigned long long memory_limit) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ RRDSET *chart = cg->st_mem_usage_limit;
+
+ if (unlikely(!cg->st_mem_usage_limit)) {
+ char *title = "Used RAM within the limits";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage_limit" : "cgroup.mem_usage_limit";
+ int prio = cgroup_containers_chart_priority + 200;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_mem_usage_limit = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "mem_usage_limit",
+ NULL,
+ "mem",
+ context,
+ title,
+ "MiB",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_STACKED);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+
+ rrddim_add(chart, "available", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrdset_isnot_obsolete___safe_from_collector_thread(chart);
+
+ rrddim_set(chart, "available", (collected_number)(memory_limit - cg->memory.usage_in_bytes));
+ rrddim_set(chart, "used", (collected_number)cg->memory.usage_in_bytes);
+ rrdset_done(chart);
+}
+
+void update_mem_utilization_chart(struct cgroup *cg, unsigned long long memory_limit) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ RRDSET *chart = cg->st_mem_utilization;
+
+ if (unlikely(!cg->st_mem_utilization)) {
+ char *title = "Memory Utilization";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_utilization" : "cgroup.mem_utilization";
+ int prio = cgroup_containers_chart_priority + 199;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_mem_utilization = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "mem_utilization",
+ NULL,
+ "mem",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_AREA);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+
+ rrddim_add(chart, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrdset_isnot_obsolete___safe_from_collector_thread(chart);
+ collected_number util = (collected_number)(cg->memory.usage_in_bytes * 100 / memory_limit);
+ rrddim_set(chart, "utilization", util);
+ rrdset_done(chart);
+}
+
+void update_mem_failcnt_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_mem_failcnt;
+
+ if (unlikely(!cg->st_mem_failcnt)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Memory Limit Failures";
+ context = "systemd.service.memory.failcnt";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10;
+ } else {
+ title = "Memory Limit Failures";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_failcnt" : "cgroup.mem_failcnt";
+ prio = cgroup_containers_chart_priority + 250;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_mem_failcnt = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "mem_failcnt",
+ NULL,
+ "mem",
+ context,
+ title,
+ "count",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ rrddim_add(chart, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set(chart, "failures", (collected_number)cg->memory.failcnt);
+ rrdset_done(chart);
+}
+
+void update_mem_usage_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_mem_usage;
+
+ if (unlikely(!cg->st_mem_usage)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Used Memory";
+ context = "systemd.service.memory.usage";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 5;
+ } else {
+ title = "Used Memory";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage" : "cgroup.mem_usage";
+ prio = cgroup_containers_chart_priority + 210;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_mem_usage = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "mem_usage",
+ NULL,
+ "mem",
+ context,
+ title,
+ "MiB",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_STACKED);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+
+ cg->st_mem_rd_ram = rrddim_add(chart, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ cg->st_mem_rd_swap = rrddim_add(chart, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, cg->st_mem_rd_ram, (collected_number)cg->memory.usage_in_bytes);
+
+ if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ rrddim_set_by_pointer(
+ chart,
+ cg->st_mem_rd_swap,
+ cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ?
+ (collected_number)(cg->memory.msw_usage_in_bytes -
+ (cg->memory.usage_in_bytes + cg->memory.total_inactive_file)) :
+ 0);
+ } else {
+ rrddim_set_by_pointer(chart, cg->st_mem_rd_swap, (collected_number)cg->memory.msw_usage_in_bytes);
+ }
+
+ rrdset_done(chart);
+}
+
+void update_io_serviced_bytes_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_io;
+
+ if (unlikely(!cg->st_io)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Disk Read/Write Bandwidth";
+ context = "systemd.service.disk.io";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 35;
+ } else {
+ title = "I/O Bandwidth (all disks)";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.io" : "cgroup.io";
+ prio = cgroup_containers_chart_priority + 1200;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_io = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "io",
+ NULL,
+ "disk",
+ context,
+ title,
+ "KiB/s",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_AREA);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ cg->st_io_rd_read = rrddim_add(chart, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ cg->st_io_rd_written = rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, cg->st_io_rd_read, (collected_number)cg->io_service_bytes.Read);
+ rrddim_set_by_pointer(chart, cg->st_io_rd_written, (collected_number)cg->io_service_bytes.Write);
+ rrdset_done(chart);
+}
+
+void update_io_serviced_ops_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_serviced_ops;
+
+ if (unlikely(!cg->st_serviced_ops)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Disk Read/Write Operations";
+ context = "systemd.service.disk.iops";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40;
+ } else {
+ title = "Serviced I/O Operations (all disks)";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.serviced_ops" : "cgroup.serviced_ops";
+ prio = cgroup_containers_chart_priority + 1200;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_serviced_ops = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "serviced_ops",
+ NULL,
+ "disk",
+ context,
+ title,
+ "operations/s",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ rrddim_add(chart, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(chart, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set(chart, "read", (collected_number)cg->io_serviced.Read);
+ rrddim_set(chart, "write", (collected_number)cg->io_serviced.Write);
+ rrdset_done(chart);
+}
+
+void update_throttle_io_serviced_bytes_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_throttle_io;
+
+ if (unlikely(!cg->st_throttle_io)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Throttle Disk Read/Write Bandwidth";
+ context = "systemd.service.disk.throttle.io";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 45;
+ } else {
+ title = "Throttle I/O Bandwidth (all disks)";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_io" : "cgroup.throttle_io";
+ prio = cgroup_containers_chart_priority + 1200;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_throttle_io = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "throttle_io",
+ NULL,
+ "disk",
+ context,
+ title,
+ "KiB/s",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_AREA);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+
+ cg->st_throttle_io_rd_read = rrddim_add(chart, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ cg->st_throttle_io_rd_written = rrddim_add(chart, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, cg->st_throttle_io_rd_read, (collected_number)cg->throttle_io_service_bytes.Read);
+ rrddim_set_by_pointer(chart, cg->st_throttle_io_rd_written, (collected_number)cg->throttle_io_service_bytes.Write);
+ rrdset_done(chart);
+}
+
+void update_throttle_io_serviced_ops_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_throttle_serviced_ops;
+
+ if (unlikely(!cg->st_throttle_serviced_ops)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Throttle Disk Read/Write Operations";
+ context = "systemd.service.disk.throttle.iops";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50;
+ } else {
+ title = "Throttle Serviced I/O Operations (all disks)";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_serviced_ops" : "cgroup.throttle_serviced_ops";
+ prio = cgroup_containers_chart_priority + 1200;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_throttle_serviced_ops = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "throttle_serviced_ops",
+ NULL,
+ "disk",
+ context,
+ title,
+ "operations/s",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ rrddim_add(chart, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(chart, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set(chart, "read", (collected_number)cg->throttle_io_serviced.Read);
+ rrddim_set(chart, "write", (collected_number)cg->throttle_io_serviced.Write);
+ rrdset_done(chart);
+}
+
+void update_io_queued_ops_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_queued_ops;
+
+ if (unlikely(!cg->st_queued_ops)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Queued Disk Read/Write Operations";
+ context = "systemd.service.disk.queued_iops";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 55;
+ } else {
+ title = "Queued I/O Operations (all disks)";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.queued_ops" : "cgroup.queued_ops";
+ prio = cgroup_containers_chart_priority + 2000;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_queued_ops = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "queued_ops",
+ NULL,
+ "disk",
+ context,
+ title,
+ "operations",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ rrddim_add(chart, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(chart, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set(chart, "read", (collected_number)cg->io_queued.Read);
+ rrddim_set(chart, "write", (collected_number)cg->io_queued.Write);
+ rrdset_done(chart);
+}
+
+void update_io_merged_ops_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_merged_ops;
+
+ if (unlikely(!cg->st_merged_ops)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Merged Disk Read/Write Operations";
+ context = "systemd.service.disk.merged_iops";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60;
+ } else {
+ title = "Merged I/O Operations (all disks)";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.merged_ops" : "cgroup.merged_ops";
+ prio = cgroup_containers_chart_priority + 2100;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_merged_ops = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "merged_ops",
+ NULL,
+ "disk",
+ context,
+ title,
+ "operations/s",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ rrddim_add(chart, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(chart, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set(chart, "read", (collected_number)cg->io_merged.Read);
+ rrddim_set(chart, "write", (collected_number)cg->io_merged.Write);
+ rrdset_done(chart);
+}
+
+void update_cpu_some_pressure_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->cpu_pressure;
+ struct pressure_charts *pcs = &res->some;
+ RRDSET *chart = pcs->share_time.st;
+
+ if (unlikely(!pcs->share_time.st)) {
+ char *title = "CPU some pressure";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure" : "cgroup.cpu_some_pressure";
+ int prio = cgroup_containers_chart_priority + 2200;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "cpu_some_pressure",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
+ rrdset_done(chart);
+}
+
+void update_cpu_some_pressure_stall_time_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->cpu_pressure;
+ struct pressure_charts *pcs = &res->some;
+ RRDSET *chart = pcs->total_time.st;
+
+ if (unlikely(!pcs->total_time.st)) {
+ char *title = "CPU some pressure stall time";
+ char *context =
+ k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure_stall_time" : "cgroup.cpu_some_pressure_stall_time";
+ int prio = cgroup_containers_chart_priority + 2220;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "cpu_some_pressure_stall_time",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "ms",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
+ rrdset_done(chart);
+}
+
+void update_cpu_full_pressure_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->cpu_pressure;
+ struct pressure_charts *pcs = &res->full;
+ RRDSET *chart = pcs->share_time.st;
+
+ if (unlikely(!pcs->share_time.st)) {
+ char *title = "CPU full pressure";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure" : "cgroup.cpu_full_pressure";
+ int prio = cgroup_containers_chart_priority + 2240;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "cpu_full_pressure",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
+ rrdset_done(chart);
+}
+
+void update_cpu_full_pressure_stall_time_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->cpu_pressure;
+ struct pressure_charts *pcs = &res->full;
+ RRDSET *chart = pcs->total_time.st;
+
+ if (unlikely(!pcs->total_time.st)) {
+ char *title = "CPU full pressure stall time";
+ char *context =
+ k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure_stall_time" : "cgroup.cpu_full_pressure_stall_time";
+ int prio = cgroup_containers_chart_priority + 2260;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "cpu_full_pressure_stall_time",
+ NULL,
+ "cpu",
+ context,
+ title,
+ "ms",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
+ rrdset_done(chart);
+}
+
+void update_mem_some_pressure_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->memory_pressure;
+ struct pressure_charts *pcs = &res->some;
+ RRDSET *chart = pcs->share_time.st;
+
+ if (unlikely(!pcs->share_time.st)) {
+ char *title = "Memory some pressure";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure" : "cgroup.memory_some_pressure";
+ int prio = cgroup_containers_chart_priority + 2300;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "mem_some_pressure",
+ NULL,
+ "mem",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
+ rrdset_done(chart);
+}
+
+void update_mem_some_pressure_stall_time_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->memory_pressure;
+ struct pressure_charts *pcs = &res->some;
+ RRDSET *chart = pcs->total_time.st;
+
+ if (unlikely(!pcs->total_time.st)) {
+ char *title = "Memory some pressure stall time";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure_stall_time" :
+ "cgroup.memory_some_pressure_stall_time";
+ int prio = cgroup_containers_chart_priority + 2320;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "memory_some_pressure_stall_time",
+ NULL,
+ "mem",
+ context,
+ title,
+ "ms",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
+ rrdset_done(chart);
+}
+
+void update_mem_full_pressure_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->memory_pressure;
+ struct pressure_charts *pcs = &res->full;
+ RRDSET *chart = pcs->share_time.st;
+
+ if (unlikely(!pcs->share_time.st)) {
+ char *title = "Memory full pressure";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure" : "cgroup.memory_full_pressure";
+ int prio = cgroup_containers_chart_priority + 2340;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "mem_full_pressure",
+ NULL,
+ "mem",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
+ rrdset_done(chart);
+}
+
+void update_mem_full_pressure_stall_time_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->memory_pressure;
+ struct pressure_charts *pcs = &res->full;
+ RRDSET *chart = pcs->total_time.st;
+
+ if (unlikely(!pcs->total_time.st)) {
+ char *title = "Memory full pressure stall time";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure_stall_time" :
+ "cgroup.memory_full_pressure_stall_time";
+ int prio = cgroup_containers_chart_priority + 2360;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "memory_full_pressure_stall_time",
+ NULL,
+ "mem",
+ context,
+ title,
+ "ms",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
+ rrdset_done(chart);
+}
+
+void update_irq_some_pressure_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->irq_pressure;
+ struct pressure_charts *pcs = &res->some;
+ RRDSET *chart = pcs->share_time.st;
+
+ if (unlikely(!pcs->share_time.st)) {
+ char *title = "IRQ some pressure";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure" : "cgroup.irq_some_pressure";
+ int prio = cgroup_containers_chart_priority + 2310;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "irq_some_pressure",
+ NULL,
+ "interrupts",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
+ rrdset_done(chart);
+}
+
+void update_irq_some_pressure_stall_time_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->irq_pressure;
+ struct pressure_charts *pcs = &res->some;
+ RRDSET *chart = pcs->total_time.st;
+
+ if (unlikely(!pcs->total_time.st)) {
+ char *title = "IRQ some pressure stall time";
+ char *context =
+ k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure_stall_time" : "cgroup.irq_some_pressure_stall_time";
+ int prio = cgroup_containers_chart_priority + 2330;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "irq_some_pressure_stall_time",
+ NULL,
+ "interrupts",
+ context,
+ title,
+ "ms",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
+ rrdset_done(chart);
+}
+
+void update_irq_full_pressure_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->irq_pressure;
+ struct pressure_charts *pcs = &res->full;
+ RRDSET *chart = pcs->share_time.st;
+
+ if (unlikely(!pcs->share_time.st)) {
+ char *title = "IRQ full pressure";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure" : "cgroup.irq_full_pressure";
+ int prio = cgroup_containers_chart_priority + 2350;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "irq_full_pressure",
+ NULL,
+ "interrupts",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
+ rrdset_done(chart);
+}
+
+void update_irq_full_pressure_stall_time_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->irq_pressure;
+ struct pressure_charts *pcs = &res->full;
+ RRDSET *chart = pcs->total_time.st;
+
+ if (unlikely(!pcs->total_time.st)) {
+ char *title = "IRQ full pressure stall time";
+ char *context =
+ k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure_stall_time" : "cgroup.irq_full_pressure_stall_time";
+ int prio = cgroup_containers_chart_priority + 2370;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "irq_full_pressure_stall_time",
+ NULL,
+ "interrupts",
+ context,
+ title,
+ "ms",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
+ rrdset_done(chart);
+}
+
+void update_io_some_pressure_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->io_pressure;
+ struct pressure_charts *pcs = &res->some;
+ RRDSET *chart = pcs->share_time.st;
+
+ if (unlikely(!pcs->share_time.st)) {
+ char *title = "I/O some pressure";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure" : "cgroup.io_some_pressure";
+ int prio = cgroup_containers_chart_priority + 2400;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "io_some_pressure",
+ NULL,
+ "disk",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
+ rrdset_done(chart);
+}
+
+void update_io_some_pressure_stall_time_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->io_pressure;
+ struct pressure_charts *pcs = &res->some;
+ RRDSET *chart = pcs->total_time.st;
+
+ if (unlikely(!pcs->total_time.st)) {
+ char *title = "I/O some pressure stall time";
+ char *context =
+ k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure_stall_time" : "cgroup.io_some_pressure_stall_time";
+ int prio = cgroup_containers_chart_priority + 2420;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "io_some_pressure_stall_time",
+ NULL,
+ "disk",
+ context,
+ title,
+ "ms",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
+ rrdset_done(chart);
+}
+
+void update_io_full_pressure_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->io_pressure;
+ struct pressure_charts *pcs = &res->full;
+ RRDSET *chart = pcs->share_time.st;
+
+ if (unlikely(!pcs->share_time.st)) {
+ char *title = "I/O full pressure";
+ char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure" : "cgroup.io_full_pressure";
+ int prio = cgroup_containers_chart_priority + 2440;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "io_full_pressure",
+ NULL,
+ "disk",
+ context,
+ title,
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
+ rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
+ rrdset_done(chart);
+}
+
+void update_io_full_pressure_stall_time_chart(struct cgroup *cg) {
+ if (is_cgroup_systemd_service(cg))
+ return;
+
+ struct pressure *res = &cg->io_pressure;
+ struct pressure_charts *pcs = &res->full;
+ RRDSET *chart = pcs->total_time.st;
+
+ if (unlikely(!pcs->total_time.st)) {
+ char *title = "I/O full pressure stall time";
+ char *context =
+ k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure_stall_time" : "cgroup.io_full_pressure_stall_time";
+ int prio = cgroup_containers_chart_priority + 2460;
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "io_full_pressure_stall_time",
+ NULL,
+ "disk",
+ context,
+ title,
+ "ms",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
+ rrdset_done(chart);
+}
+
+void update_pids_current_chart(struct cgroup *cg) {
+ RRDSET *chart = cg->st_pids;
+
+ if (unlikely(!cg->st_pids)) {
+ char *title;
+ char *context;
+ int prio;
+ if (is_cgroup_systemd_service(cg)) {
+ title = "Systemd Services Number of Processes";
+ context = "systemd.service.pids.current";
+ prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70;
+ } else {
+ title = "Number of processes";
+ context = k8s_is_kubepod(cg) ? "k8s.cgroup.pids_current" : "cgroup.pids_current";
+ prio = cgroup_containers_chart_priority + 2150;
+ }
+
+ char buff[RRD_ID_LENGTH_MAX + 1];
+ chart = cg->st_pids = rrdset_create_localhost(
+ cgroup_chart_type(buff, cg),
+ "pids_current",
+ NULL,
+ "pids",
+ context,
+ title,
+ "pids",
+ PLUGIN_CGROUPS_NAME,
+ is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
+ prio,
+ cgroup_update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdset_update_rrdlabels(chart, cg->chart_labels);
+ cg->st_pids_rd_pids_current = rrddim_add(chart, "pids", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(chart, cg->st_pids_rd_pids_current, (collected_number)cg->pids.pids_current);
+ rrdset_done(chart);
+}
diff --git a/collectors/cgroups.plugin/cgroup-discovery.c b/collectors/cgroups.plugin/cgroup-discovery.c
new file mode 100644
index 000000000..ede35ed8a
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-discovery.c
@@ -0,0 +1,1245 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "cgroup-internals.h"
+
+// discovery cgroup thread worker jobs
+#define WORKER_DISCOVERY_INIT 0
+#define WORKER_DISCOVERY_FIND 1
+#define WORKER_DISCOVERY_PROCESS 2
+#define WORKER_DISCOVERY_PROCESS_RENAME 3
+#define WORKER_DISCOVERY_PROCESS_NETWORK 4
+#define WORKER_DISCOVERY_PROCESS_FIRST_TIME 5
+#define WORKER_DISCOVERY_UPDATE 6
+#define WORKER_DISCOVERY_CLEANUP 7
+#define WORKER_DISCOVERY_COPY 8
+#define WORKER_DISCOVERY_SHARE 9
+#define WORKER_DISCOVERY_LOCK 10
+
+#if WORKER_UTILIZATION_MAX_JOB_TYPES < 11
+#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 11
+#endif
+
+struct cgroup *discovered_cgroup_root = NULL;
+
+char cgroup_chart_id_prefix[] = "cgroup_";
+char services_chart_id_prefix[] = "systemd_";
+char *cgroups_rename_script = NULL;
+
+
+// ----------------------------------------------------------------------------
+
+static inline void free_pressure(struct pressure *res) {
+ if (res->some.share_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->some.share_time.st);
+ if (res->some.total_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->some.total_time.st);
+ if (res->full.share_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->full.share_time.st);
+ if (res->full.total_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->full.total_time.st);
+ freez(res->filename);
+}
+
+static inline void cgroup_free_network_interfaces(struct cgroup *cg) {
+ while(cg->interfaces) {
+ struct cgroup_network_interface *i = cg->interfaces;
+ cg->interfaces = i->next;
+
+ // delete the registration of proc_net_dev rename
+ netdev_rename_device_del(i->host_device);
+
+ freez((void *)i->host_device);
+ freez((void *)i->container_device);
+ freez((void *)i);
+ }
+}
+
+static inline void cgroup_free(struct cgroup *cg) {
+ netdata_log_debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available");
+
+ cgroup_netdev_delete(cg);
+
+ if(cg->st_cpu) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu);
+ if(cg->st_cpu_limit) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_limit);
+ if(cg->st_cpu_per_core) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_per_core);
+ if(cg->st_cpu_nr_throttled) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_nr_throttled);
+ if(cg->st_cpu_throttled_time) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_throttled_time);
+ if(cg->st_cpu_shares) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_shares);
+ if(cg->st_mem) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem);
+ if(cg->st_writeback) rrdset_is_obsolete___safe_from_collector_thread(cg->st_writeback);
+ if(cg->st_mem_activity) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_activity);
+ if(cg->st_pgfaults) rrdset_is_obsolete___safe_from_collector_thread(cg->st_pgfaults);
+ if(cg->st_mem_usage) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_usage);
+ if(cg->st_mem_usage_limit) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_usage_limit);
+ if(cg->st_mem_utilization) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_utilization);
+ if(cg->st_mem_failcnt) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_failcnt);
+ if(cg->st_io) rrdset_is_obsolete___safe_from_collector_thread(cg->st_io);
+ if(cg->st_serviced_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_serviced_ops);
+ if(cg->st_throttle_io) rrdset_is_obsolete___safe_from_collector_thread(cg->st_throttle_io);
+ if(cg->st_throttle_serviced_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_throttle_serviced_ops);
+ if(cg->st_queued_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_queued_ops);
+ if(cg->st_merged_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_merged_ops);
+ if(cg->st_pids) rrdset_is_obsolete___safe_from_collector_thread(cg->st_pids);
+
+ freez(cg->filename_cpuset_cpus);
+ freez(cg->filename_cpu_cfs_period);
+ freez(cg->filename_cpu_cfs_quota);
+ freez(cg->filename_memory_limit);
+ freez(cg->filename_memoryswap_limit);
+
+ cgroup_free_network_interfaces(cg);
+
+ freez(cg->cpuacct_usage.cpu_percpu);
+
+ freez(cg->cpuacct_stat.filename);
+ freez(cg->cpuacct_usage.filename);
+ freez(cg->cpuacct_cpu_throttling.filename);
+ freez(cg->cpuacct_cpu_shares.filename);
+
+ arl_free(cg->memory.arl_base);
+ freez(cg->memory.filename_detailed);
+ freez(cg->memory.filename_failcnt);
+ freez(cg->memory.filename_usage_in_bytes);
+ freez(cg->memory.filename_msw_usage_in_bytes);
+
+ freez(cg->io_service_bytes.filename);
+ freez(cg->io_serviced.filename);
+
+ freez(cg->throttle_io_service_bytes.filename);
+ freez(cg->throttle_io_serviced.filename);
+
+ freez(cg->io_merged.filename);
+ freez(cg->io_queued.filename);
+ freez(cg->pids.pids_current_filename);
+
+ free_pressure(&cg->cpu_pressure);
+ free_pressure(&cg->io_pressure);
+ free_pressure(&cg->memory_pressure);
+ free_pressure(&cg->irq_pressure);
+
+ freez(cg->id);
+ freez(cg->intermediate_id);
+ freez(cg->chart_id);
+ freez(cg->name);
+
+ rrdlabels_destroy(cg->chart_labels);
+
+ freez(cg);
+
+ cgroup_root_count--;
+}
+
+// ----------------------------------------------------------------------------
+// add/remove/find cgroup objects
+
+#define CGROUP_CHARTID_LINE_MAX 1024
+
+static inline char *cgroup_chart_id_strdupz(const char *s) {
+ if(!s || !*s) s = "/";
+
+ if(*s == '/' && s[1] != '\0') s++;
+
+ char *r = strdupz(s);
+ netdata_fix_chart_id(r);
+
+ return r;
+}
+
+// TODO: move the code to cgroup_chart_id_strdupz() when the renaming script is fixed
+static inline void substitute_dots_in_id(char *s) {
+ // dots are used to distinguish chart type and id in streaming, so we should replace them
+ for (char *d = s; *d; d++) {
+ if (*d == '.')
+ *d = '-';
+ }
+}
+
+// ----------------------------------------------------------------------------
+// parse k8s labels
+
+char *cgroup_parse_resolved_name_and_labels(RRDLABELS *labels, char *data) {
+ // the first word, up to the first space is the name
+ char *name = strsep_skip_consecutive_separators(&data, " ");
+
+ // the rest are key=value pairs separated by comma
+ while(data) {
+ char *pair = strsep_skip_consecutive_separators(&data, ",");
+ rrdlabels_add_pair(labels, pair, RRDLABEL_SRC_AUTO | RRDLABEL_SRC_K8S);
+ }
+
+ return name;
+}
+
+static inline void discovery_rename_cgroup(struct cgroup *cg) {
+ if (!cg->pending_renames) {
+ return;
+ }
+ cg->pending_renames--;
+
+ netdata_log_debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id);
+ netdata_log_debug(D_CGROUP, "executing command %s \"%s\" for cgroup '%s'", cgroups_rename_script, cg->intermediate_id, cg->chart_id);
+ pid_t cgroup_pid;
+
+ FILE *fp_child_input, *fp_child_output;
+ (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_rename_script, cg->id, cg->intermediate_id);
+ if (!fp_child_output) {
+ collector_error("CGROUP: cannot popen(%s \"%s\", \"r\").", cgroups_rename_script, cg->intermediate_id);
+ cg->pending_renames = 0;
+ cg->processed = 1;
+ return;
+ }
+
+ char buffer[CGROUP_CHARTID_LINE_MAX + 1];
+ char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp_child_output);
+ int exit_code = netdata_pclose(fp_child_input, fp_child_output, cgroup_pid);
+
+ switch (exit_code) {
+ case 0:
+ cg->pending_renames = 0;
+ break;
+
+ case 3:
+ cg->pending_renames = 0;
+ cg->processed = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ if (cg->pending_renames || cg->processed)
+ return;
+ if (!new_name || !*new_name || *new_name == '\n')
+ return;
+ if (!(new_name = trim(new_name)))
+ return;
+
+ if (!cg->chart_labels)
+ cg->chart_labels = rrdlabels_create();
+ // read the new labels and remove the obsolete ones
+ rrdlabels_unmark_all(cg->chart_labels);
+ char *name = cgroup_parse_resolved_name_and_labels(cg->chart_labels, new_name);
+ rrdlabels_remove_all_unmarked(cg->chart_labels);
+
+ freez(cg->name);
+ cg->name = strdupz(name);
+
+ freez(cg->chart_id);
+ cg->chart_id = cgroup_chart_id_strdupz(name);
+
+ substitute_dots_in_id(cg->chart_id);
+ cg->hash_chart_id = simple_hash(cg->chart_id);
+}
+
+static void is_cgroup_procs_exist(netdata_ebpf_cgroup_shm_body_t *out, char *id) {
+ struct stat buf;
+
+ snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_cpuset_base, id);
+ if (likely(stat(out->path, &buf) == 0)) {
+ return;
+ }
+
+ snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_blkio_base, id);
+ if (likely(stat(out->path, &buf) == 0)) {
+ return;
+ }
+
+ snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_memory_base, id);
+ if (likely(stat(out->path, &buf) == 0)) {
+ return;
+ }
+
+ snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_devices_base, id);
+ if (likely(stat(out->path, &buf) == 0)) {
+ return;
+ }
+
+ out->path[0] = '\0';
+ out->enabled = 0;
+}
+
+static inline void convert_cgroup_to_systemd_service(struct cgroup *cg) {
+ char buffer[CGROUP_CHARTID_LINE_MAX + 1];
+ cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
+ strncpyz(buffer, cg->id, CGROUP_CHARTID_LINE_MAX);
+ char *s = buffer;
+
+ // skip to the last slash
+ size_t len = strlen(s);
+ while (len--) {
+ if (unlikely(s[len] == '/')) {
+ break;
+ }
+ }
+ if (len) {
+ s = &s[len + 1];
+ }
+
+ // remove extension
+ len = strlen(s);
+ while (len--) {
+ if (unlikely(s[len] == '.')) {
+ break;
+ }
+ }
+ if (len) {
+ s[len] = '\0';
+ }
+
+ freez(cg->name);
+ cg->name = strdupz(s);
+
+ freez(cg->chart_id);
+ cg->chart_id = cgroup_chart_id_strdupz(s);
+ substitute_dots_in_id(cg->chart_id);
+ cg->hash_chart_id = simple_hash(cg->chart_id);
+}
+
+static inline struct cgroup *discovery_cgroup_add(const char *id) {
+ netdata_log_debug(D_CGROUP, "adding to list, cgroup with id '%s'", id);
+
+ struct cgroup *cg = callocz(1, sizeof(struct cgroup));
+
+ cg->id = strdupz(id);
+ cg->hash = simple_hash(cg->id);
+
+ cg->name = strdupz(id);
+
+ cg->intermediate_id = cgroup_chart_id_strdupz(id);
+
+ cg->chart_id = cgroup_chart_id_strdupz(id);
+ substitute_dots_in_id(cg->chart_id);
+ cg->hash_chart_id = simple_hash(cg->chart_id);
+
+ if (cgroup_use_unified_cgroups) {
+ cg->options |= CGROUP_OPTIONS_IS_UNIFIED;
+ }
+
+ if (!discovered_cgroup_root)
+ discovered_cgroup_root = cg;
+ else {
+ struct cgroup *t;
+ for (t = discovered_cgroup_root; t->discovered_next; t = t->discovered_next) {
+ }
+ t->discovered_next = cg;
+ }
+
+ return cg;
+}
+
+static inline struct cgroup *discovery_cgroup_find(const char *id) {
+ netdata_log_debug(D_CGROUP, "searching for cgroup '%s'", id);
+
+ uint32_t hash = simple_hash(id);
+
+ struct cgroup *cg;
+ for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
+ if(hash == cg->hash && strcmp(id, cg->id) == 0)
+ break;
+ }
+
+ netdata_log_debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found");
+ return cg;
+}
+
+static int calc_cgroup_depth(const char *id) {
+ int depth = 0;
+ const char *s;
+ for (s = id; *s; s++) {
+ depth += unlikely(*s == '/');
+ }
+ return depth;
+}
+
+static inline void discovery_find_cgroup_in_dir_callback(const char *dir) {
+ if (!dir || !*dir) {
+ dir = "/";
+ }
+
+ netdata_log_debug(D_CGROUP, "examining cgroup dir '%s'", dir);
+
+ struct cgroup *cg = discovery_cgroup_find(dir);
+ if (cg) {
+ cg->available = 1;
+ return;
+ }
+
+ if (cgroup_root_count >= cgroup_root_max) {
+ nd_log_limit_static_global_var(erl, 3600, 0);
+ nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING, "CGROUP: maximum number of cgroups reached (%d). No more cgroups will be added.", cgroup_root_count);
+ return;
+ }
+
+ if (cgroup_max_depth > 0) {
+ int depth = calc_cgroup_depth(dir);
+ if (depth > cgroup_max_depth) {
+ nd_log_collector(NDLP_DEBUG, "CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth);
+ return;
+ }
+ }
+
+ cg = discovery_cgroup_add(dir);
+ cg->available = 1;
+ cg->first_time_seen = 1;
+ cg->function_ready = false;
+ cgroup_root_count++;
+}
+
+static inline int discovery_find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) {
+ if(!this) this = base;
+ netdata_log_debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base);
+
+ size_t dirlen = strlen(this), baselen = strlen(base);
+
+ int ret = -1;
+ int enabled = -1;
+
+ const char *relative_path = &this[baselen];
+ if(!*relative_path) relative_path = "/";
+
+ DIR *dir = opendir(this);
+ if(!dir) {
+ collector_error("CGROUP: cannot read directory '%s'", base);
+ return ret;
+ }
+ ret = 1;
+
+ callback(relative_path);
+
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR
+ && (
+ (de->d_name[0] == '.' && de->d_name[1] == '\0')
+ || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
+ ))
+ continue;
+
+ if(de->d_type == DT_DIR) {
+ if(enabled == -1) {
+ const char *r = relative_path;
+ if(*r == '\0') r = "/";
+
+ // do not decent in directories we are not interested
+ enabled = matches_search_cgroup_paths(r);
+ }
+
+ if(enabled) {
+ char *s = mallocz(dirlen + strlen(de->d_name) + 2);
+ strcpy(s, this);
+ strcat(s, "/");
+ strcat(s, de->d_name);
+ int ret2 = discovery_find_dir_in_subdirs(base, s, callback);
+ if(ret2 > 0) ret += ret2;
+ freez(s);
+ }
+ }
+ }
+
+ closedir(dir);
+ return ret;
+}
+
+static inline void discovery_mark_as_unavailable_all_cgroups() {
+ for (struct cgroup *cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
+ cg->available = 0;
+ }
+}
+
+static inline void discovery_update_filenames_cgroup_v1(struct cgroup *cg) {
+ char filename[FILENAME_MAX + 1];
+ struct stat buf;
+
+ // CPU
+ if (unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_stat.filename = strdupz(filename);
+ cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpuset.cpus", cgroup_cpuset_base, cg->id);
+ cg->filename_cpuset_cpus = strdupz(filename);
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_period_us", cgroup_cpuacct_base, cg->id);
+ cg->filename_cpu_cfs_period = strdupz(filename);
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_quota_us", cgroup_cpuacct_base, cg->id);
+ cg->filename_cpu_cfs_quota = strdupz(filename);
+ }
+ }
+ // FIXME: remove usage_percpu
+ if (unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !is_cgroup_systemd_service(cg))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_usage.filename = strdupz(filename);
+ cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage;
+ }
+ }
+ if (unlikely(
+ cgroup_enable_cpuacct_cpu_throttling && !cg->cpuacct_cpu_throttling.filename &&
+ !is_cgroup_systemd_service(cg))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_cpuacct_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_cpu_throttling.filename = strdupz(filename);
+ cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling;
+ }
+ }
+ if (unlikely(
+ cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename && !is_cgroup_systemd_service(cg))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.shares", cgroup_cpuacct_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_cpu_shares.filename = strdupz(filename);
+ cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares;
+ }
+ }
+
+ // Memory
+ if (unlikely(
+ (cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed &&
+ (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_detailed = strdupz(filename);
+ cg->memory.enabled_detailed =
+ (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_AUTO;
+ }
+ }
+ if (unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.limit_in_bytes", cgroup_memory_base, cg->id);
+ cg->filename_memory_limit = strdupz(filename);
+ }
+ }
+ if (unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.usage_in_bytes", cgroup_memory_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.limit_in_bytes", cgroup_memory_base, cg->id);
+ cg->filename_memoryswap_limit = strdupz(filename);
+ }
+ }
+ if (unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_failcnt = strdupz(filename);
+ cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt;
+ }
+ }
+
+ // Blkio
+ if (unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes_recursive", cgroup_blkio_base, cg->id);
+ if (unlikely(stat(filename, &buf) != -1)) {
+ cg->io_service_bytes.filename = strdupz(filename);
+ cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
+ } else {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_service_bytes.filename = strdupz(filename);
+ cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
+ }
+ }
+ }
+ if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced_recursive", cgroup_blkio_base, cg->id);
+ if (unlikely(stat(filename, &buf) != -1)) {
+ cg->io_serviced.filename = strdupz(filename);
+ cg->io_serviced.enabled = cgroup_enable_blkio_ops;
+ } else {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_serviced.filename = strdupz(filename);
+ cg->io_serviced.enabled = cgroup_enable_blkio_ops;
+ }
+ }
+ }
+ if (unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes_recursive", cgroup_blkio_base, cg->id);
+ if (unlikely(stat(filename, &buf) != -1)) {
+ cg->throttle_io_service_bytes.filename = strdupz(filename);
+ cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io;
+ } else {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->throttle_io_service_bytes.filename = strdupz(filename);
+ cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io;
+ }
+ }
+ }
+ if (unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced_recursive", cgroup_blkio_base, cg->id);
+ if (unlikely(stat(filename, &buf) != -1)) {
+ cg->throttle_io_serviced.filename = strdupz(filename);
+ cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops;
+ } else {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->throttle_io_serviced.filename = strdupz(filename);
+ cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops;
+ }
+ }
+ }
+ if (unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged_recursive", cgroup_blkio_base, cg->id);
+ if (unlikely(stat(filename, &buf) != -1)) {
+ cg->io_merged.filename = strdupz(filename);
+ cg->io_merged.enabled = cgroup_enable_blkio_merged_ops;
+ } else {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_merged.filename = strdupz(filename);
+ cg->io_merged.enabled = cgroup_enable_blkio_merged_ops;
+ }
+ }
+ }
+ if (unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued_recursive", cgroup_blkio_base, cg->id);
+ if (unlikely(stat(filename, &buf) != -1)) {
+ cg->io_queued.filename = strdupz(filename);
+ cg->io_queued.enabled = cgroup_enable_blkio_queued_ops;
+ } else {
+ snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_queued.filename = strdupz(filename);
+ cg->io_queued.enabled = cgroup_enable_blkio_queued_ops;
+ }
+ }
+ }
+
+ // Pids
+ if (unlikely(!cg->pids.pids_current_filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/pids.current", cgroup_pids_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->pids.pids_current_filename = strdupz(filename);
+ }
+ }
+}
+
+static inline void discovery_update_filenames_cgroup_v2(struct cgroup *cg) {
+ char filename[FILENAME_MAX + 1];
+ struct stat buf;
+
+ // CPU
+ if (unlikely((cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_cpu_throttling) && !cg->cpuacct_stat.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_stat.filename = strdupz(filename);
+ cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
+ cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling;
+ cg->filename_cpuset_cpus = NULL;
+ cg->filename_cpu_cfs_period = NULL;
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.max", cgroup_unified_base, cg->id);
+ cg->filename_cpu_cfs_quota = strdupz(filename);
+ }
+ }
+ if (unlikely(cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.weight", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->cpuacct_cpu_shares.filename = strdupz(filename);
+ cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares;
+ }
+ }
+
+ // Memory
+ // FIXME: this if condition!
+ if (unlikely(
+ (cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed &&
+ (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_detailed = strdupz(filename);
+ cg->memory.enabled_detailed =
+ (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_AUTO;
+ }
+ }
+
+ if (unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.current", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.max", cgroup_unified_base, cg->id);
+ cg->filename_memory_limit = strdupz(filename);
+ }
+ }
+
+ if (unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.current", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
+ cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.max", cgroup_unified_base, cg->id);
+ cg->filename_memoryswap_limit = strdupz(filename);
+ }
+ }
+
+ // Blkio
+ if (unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_service_bytes.filename = strdupz(filename);
+ cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
+ }
+ }
+
+ if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_serviced.filename = strdupz(filename);
+ cg->io_serviced.enabled = cgroup_enable_blkio_ops;
+ }
+ }
+
+ // PSI
+ if (unlikely(cgroup_enable_pressure_cpu && !cg->cpu_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/cpu.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->cpu_pressure.filename = strdupz(filename);
+ cg->cpu_pressure.some.enabled = cgroup_enable_pressure_cpu;
+ cg->cpu_pressure.full.enabled = CONFIG_BOOLEAN_NO;
+ }
+ }
+
+ if (unlikely((cgroup_enable_pressure_io_some || cgroup_enable_pressure_io_full) && !cg->io_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/io.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->io_pressure.filename = strdupz(filename);
+ cg->io_pressure.some.enabled = cgroup_enable_pressure_io_some;
+ cg->io_pressure.full.enabled = cgroup_enable_pressure_io_full;
+ }
+ }
+
+ if (unlikely(
+ (cgroup_enable_pressure_memory_some || cgroup_enable_pressure_memory_full) &&
+ !cg->memory_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/memory.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->memory_pressure.filename = strdupz(filename);
+ cg->memory_pressure.some.enabled = cgroup_enable_pressure_memory_some;
+ cg->memory_pressure.full.enabled = cgroup_enable_pressure_memory_full;
+ }
+ }
+
+ if (unlikely((cgroup_enable_pressure_irq_some || cgroup_enable_pressure_irq_full) && !cg->irq_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/irq.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->irq_pressure.filename = strdupz(filename);
+ cg->irq_pressure.some.enabled = cgroup_enable_pressure_irq_some;
+ cg->irq_pressure.full.enabled = cgroup_enable_pressure_irq_full;
+ }
+ }
+
+ // Pids
+ if (unlikely(!cg->pids.pids_current_filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/pids.current", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->pids.pids_current_filename = strdupz(filename);
+ }
+ }
+}
+
+static inline void discovery_update_filenames_all_cgroups() {
+ for (struct cgroup *cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
+ if (unlikely(!cg->available || !cg->enabled || cg->pending_renames))
+ continue;
+
+ if (!cgroup_use_unified_cgroups)
+ discovery_update_filenames_cgroup_v1(cg);
+ else if (likely(cgroup_unified_exist))
+ discovery_update_filenames_cgroup_v2(cg);
+ }
+}
+
+static inline void discovery_cleanup_all_cgroups() {
+ struct cgroup *cg = discovered_cgroup_root, *last = NULL;
+
+ for(; cg ;) {
+ if(!cg->available) {
+ // enable the first duplicate cgroup
+ {
+ struct cgroup *t;
+ for (t = discovered_cgroup_root; t; t = t->discovered_next) {
+ if (t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE &&
+ (is_cgroup_systemd_service(t) == is_cgroup_systemd_service(cg)) &&
+ t->hash_chart_id == cg->hash_chart_id && !strcmp(t->chart_id, cg->chart_id)) {
+ netdata_log_debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id);
+ t->enabled = 1;
+ t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ break;
+ }
+ }
+ }
+
+ if(!last)
+ discovered_cgroup_root = cg->discovered_next;
+ else
+ last->discovered_next = cg->discovered_next;
+
+ cgroup_free(cg);
+
+ if(!last)
+ cg = discovered_cgroup_root;
+ else
+ cg = last->discovered_next;
+ }
+ else {
+ last = cg;
+ cg = cg->discovered_next;
+ }
+ }
+}
+
+static inline void discovery_copy_discovered_cgroups_to_reader() {
+ netdata_log_debug(D_CGROUP, "copy discovered cgroups to the main group list");
+
+ struct cgroup *cg;
+
+ for (cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
+ cg->next = cg->discovered_next;
+ }
+
+ cgroup_root = discovered_cgroup_root;
+}
+
+static inline void discovery_share_cgroups_with_ebpf() {
+ struct cgroup *cg;
+ int count;
+ struct stat buf;
+
+ if (shm_mutex_cgroup_ebpf == SEM_FAILED) {
+ return;
+ }
+ sem_wait(shm_mutex_cgroup_ebpf);
+
+ for (cg = cgroup_root, count = 0; cg; cg = cg->next, count++) {
+ netdata_ebpf_cgroup_shm_body_t *ptr = &shm_cgroup_ebpf.body[count];
+ char *prefix = (is_cgroup_systemd_service(cg)) ? services_chart_id_prefix : cgroup_chart_id_prefix;
+ snprintfz(ptr->name, CGROUP_EBPF_NAME_SHARED_LENGTH - 1, "%s%s", prefix, cg->chart_id);
+ ptr->hash = simple_hash(ptr->name);
+ ptr->options = cg->options;
+ ptr->enabled = cg->enabled;
+ if (cgroup_use_unified_cgroups) {
+ snprintfz(ptr->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_unified_base, cg->id);
+ if (likely(stat(ptr->path, &buf) == -1)) {
+ ptr->path[0] = '\0';
+ ptr->enabled = 0;
+ }
+ } else {
+ is_cgroup_procs_exist(ptr, cg->id);
+ }
+
+ netdata_log_debug(D_CGROUP, "cgroup shared: NAME=%s, ENABLED=%d", ptr->name, ptr->enabled);
+ }
+
+ shm_cgroup_ebpf.header->cgroup_root_count = count;
+ sem_post(shm_mutex_cgroup_ebpf);
+}
+
+static inline void discovery_find_all_cgroups_v1() {
+ if (cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) {
+ if (discovery_find_dir_in_subdirs(cgroup_cpuacct_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
+ cgroup_enable_cpuacct_stat = cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
+ collector_error("CGROUP: disabled cpu statistics.");
+ }
+ }
+
+ if (cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io ||
+ cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) {
+ if (discovery_find_dir_in_subdirs(cgroup_blkio_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
+ cgroup_enable_blkio_io = cgroup_enable_blkio_ops = cgroup_enable_blkio_throttle_io =
+ cgroup_enable_blkio_throttle_ops = cgroup_enable_blkio_merged_ops = cgroup_enable_blkio_queued_ops =
+ CONFIG_BOOLEAN_NO;
+ collector_error("CGROUP: disabled blkio statistics.");
+ }
+ }
+
+ if (cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) {
+ if (discovery_find_dir_in_subdirs(cgroup_memory_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
+ cgroup_enable_memory = cgroup_enable_detailed_memory = cgroup_enable_swap = cgroup_enable_memory_failcnt =
+ CONFIG_BOOLEAN_NO;
+ collector_error("CGROUP: disabled memory statistics.");
+ }
+ }
+
+ if (cgroup_search_in_devices) {
+ if (discovery_find_dir_in_subdirs(cgroup_devices_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
+ cgroup_search_in_devices = 0;
+ collector_error("CGROUP: disabled devices statistics.");
+ }
+ }
+}
+
+static inline void discovery_find_all_cgroups_v2() {
+ if (discovery_find_dir_in_subdirs(cgroup_unified_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
+ cgroup_unified_exist = CONFIG_BOOLEAN_NO;
+ collector_error("CGROUP: disabled unified cgroups statistics.");
+ }
+}
+
+static int is_digits_only(const char *s) {
+ do {
+ if (!isdigit(*s++)) {
+ return 0;
+ }
+ } while (*s);
+
+ return 1;
+}
+
+static int is_cgroup_k8s_container(const char *id) {
+ // examples:
+ // https://github.com/netdata/netdata/blob/0fc101679dcd12f1cb8acdd07bb4c85d8e553e53/collectors/cgroups.plugin/cgroup-name.sh#L121-L147
+ const char *p = id;
+ const char *pp = NULL;
+ int i = 0;
+ size_t l = 3; // pod
+ while ((p = strstr(p, "pod"))) {
+ i++;
+ p += l;
+ pp = p;
+ }
+ return !(i < 2 || !pp || !(pp = strchr(pp, '/')) || !pp++ || !*pp);
+}
+
+#define TASK_COMM_LEN 16
+
+static int k8s_get_container_first_proc_comm(const char *id, char *comm) {
+ if (!is_cgroup_k8s_container(id)) {
+ return 1;
+ }
+
+ static procfile *ff = NULL;
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/%s/cgroup.procs", cgroup_cpuacct_base, id);
+
+ ff = procfile_reopen(ff, filename, NULL, CGROUP_PROCFILE_FLAG);
+ if (unlikely(!ff)) {
+ netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename);
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff)) {
+ netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename);
+ return 1;
+ }
+
+ unsigned long lines = procfile_lines(ff);
+ if (likely(lines < 2)) {
+ return 1;
+ }
+
+ char *pid = procfile_lineword(ff, 0, 0);
+ if (!pid || !*pid) {
+ return 1;
+ }
+
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%s/comm", netdata_configured_host_prefix, pid);
+
+ ff = procfile_reopen(ff, filename, NULL, PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff)) {
+ netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename);
+ return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff)) {
+ netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename);
+ return 1;
+ }
+
+ lines = procfile_lines(ff);
+ if (unlikely(lines != 2)) {
+ return 1;
+ }
+
+ char *proc_comm = procfile_lineword(ff, 0, 0);
+ if (!proc_comm || !*proc_comm) {
+ return 1;
+ }
+
+ strncpyz(comm, proc_comm, TASK_COMM_LEN);
+ return 0;
+}
+
+static inline void discovery_process_first_time_seen_cgroup(struct cgroup *cg) {
+ if (!cg->first_time_seen) {
+ return;
+ }
+ cg->first_time_seen = 0;
+
+ char comm[TASK_COMM_LEN + 1];
+
+ if (cg->container_orchestrator == CGROUPS_ORCHESTRATOR_UNSET) {
+ if (strstr(cg->id, "kubepods")) {
+ cg->container_orchestrator = CGROUPS_ORCHESTRATOR_K8S;
+ } else {
+ cg->container_orchestrator = CGROUPS_ORCHESTRATOR_UNKNOWN;
+ }
+ }
+
+ if (is_inside_k8s && !k8s_get_container_first_proc_comm(cg->id, comm)) {
+ // container initialization may take some time when CPU % is high
+ // seen on GKE: comm is '6' before 'runc:[2:INIT]' (dunno if it could be another number)
+ if (is_digits_only(comm) || matches_entrypoint_parent_process_comm(comm)) {
+ cg->first_time_seen = 1;
+ return;
+ }
+ if (!strcmp(comm, "pause")) {
+ // a container that holds the network namespace for the pod
+ // we don't need to collect its metrics
+ cg->processed = 1;
+ return;
+ }
+ }
+
+ if (cgroup_enable_systemd_services && matches_systemd_services_cgroups(cg->id)) {
+ netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'cgroups to match as systemd services'", cg->id, cg->chart_id);
+ convert_cgroup_to_systemd_service(cg);
+ return;
+ }
+
+ if (matches_enabled_cgroup_renames(cg->id)) {
+ netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'run script to rename cgroups matching', will try to rename it", cg->id, cg->chart_id);
+ if (is_inside_k8s && is_cgroup_k8s_container(cg->id)) {
+ // it may take up to a minute for the K8s API to return data for the container
+ // tested on AWS K8s cluster with 100% CPU utilization
+ cg->pending_renames = 9; // 1.5 minute
+ } else {
+ cg->pending_renames = 2;
+ }
+ }
+}
+
+static int discovery_is_cgroup_duplicate(struct cgroup *cg) {
+ // https://github.com/netdata/netdata/issues/797#issuecomment-241248884
+ struct cgroup *c;
+ for (c = discovered_cgroup_root; c; c = c->discovered_next) {
+ if (c != cg && c->enabled && (is_cgroup_systemd_service(c) == is_cgroup_systemd_service(cg)) &&
+ c->hash_chart_id == cg->hash_chart_id && !strcmp(c->chart_id, cg->chart_id)) {
+ collector_error(
+ "CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
+ cg->chart_id,
+ c->id,
+ cg->id);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// cgroup network interfaces
+
+#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
+
+static inline void read_cgroup_network_interfaces(struct cgroup *cg) {
+ netdata_log_debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id);
+
+ pid_t cgroup_pid;
+ char cgroup_identifier[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+
+ if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_cpuacct_base, cg->id);
+ }
+ else {
+ snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_unified_base, cg->id);
+ }
+
+ netdata_log_debug(D_CGROUP, "executing cgroup_identifier %s --cgroup '%s' for cgroup '%s'", cgroups_network_interface_script, cgroup_identifier, cg->id);
+ FILE *fp_child_input, *fp_child_output;
+ (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_network_interface_script, "--cgroup", cgroup_identifier);
+ if(!fp_child_output) {
+ collector_error("CGROUP: cannot popen(%s --cgroup \"%s\", \"r\").", cgroups_network_interface_script, cgroup_identifier);
+ return;
+ }
+
+ char *s;
+ char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp_child_output))) {
+ trim(s);
+
+ if(*s && *s != '\n') {
+ char *t = s;
+ while(*t && *t != ' ') t++;
+ if(*t == ' ') {
+ *t = '\0';
+ t++;
+ }
+
+ if(!*s) {
+ collector_error("CGROUP: empty host interface returned by script");
+ continue;
+ }
+
+ if(!*t) {
+ collector_error("CGROUP: empty guest interface returned by script");
+ continue;
+ }
+
+ struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface));
+ i->host_device = strdupz(s);
+ i->container_device = strdupz(t);
+ i->next = cg->interfaces;
+ cg->interfaces = i;
+
+ collector_info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device);
+
+ // register a device rename to proc_net_dev.c
+ netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id, cg->chart_labels,
+ k8s_is_kubepod(cg) ? "k8s." : "", cgroup_netdev_get(cg));
+ }
+ }
+
+ netdata_pclose(fp_child_input, fp_child_output, cgroup_pid);
+ // netdata_log_debug(D_CGROUP, "closed cgroup_identifier for cgroup '%s'", cg->id);
+}
+
+static inline void discovery_process_cgroup(struct cgroup *cg) {
+ if (!cg->available || cg->processed) {
+ return;
+ }
+
+ if (cg->first_time_seen) {
+ worker_is_busy(WORKER_DISCOVERY_PROCESS_FIRST_TIME);
+ discovery_process_first_time_seen_cgroup(cg);
+ if (unlikely(cg->first_time_seen || cg->processed)) {
+ return;
+ }
+ }
+
+ if (cg->pending_renames) {
+ worker_is_busy(WORKER_DISCOVERY_PROCESS_RENAME);
+ discovery_rename_cgroup(cg);
+ if (unlikely(cg->pending_renames || cg->processed)) {
+ return;
+ }
+ }
+
+ cg->processed = 1;
+
+ if ((strlen(cg->chart_id) + strlen(cgroup_chart_id_prefix)) >= RRD_ID_LENGTH_MAX) {
+ collector_info("cgroup '%s' (chart id '%s') disabled because chart_id exceeds the limit (RRD_ID_LENGTH_MAX)", cg->id, cg->chart_id);
+ return;
+ }
+
+ if (is_cgroup_systemd_service(cg)) {
+ if (discovery_is_cgroup_duplicate(cg)) {
+ cg->enabled = 0;
+ cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ return;
+ }
+ if (!cg->chart_labels)
+ cg->chart_labels = rrdlabels_create();
+ rrdlabels_add(cg->chart_labels, "service_name", cg->name, RRDLABEL_SRC_AUTO);
+ cg->enabled = 1;
+ return;
+ }
+
+ if (!(cg->enabled = matches_enabled_cgroup_names(cg->name))) {
+ netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups names matching'", cg->id, cg->name);
+ return;
+ }
+
+ if (!(cg->enabled = matches_enabled_cgroup_paths(cg->id))) {
+ netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups matching'", cg->id, cg->name);
+ return;
+ }
+
+ if (discovery_is_cgroup_duplicate(cg)) {
+ cg->enabled = 0;
+ cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ return;
+ }
+
+ if (!cg->chart_labels)
+ cg->chart_labels = rrdlabels_create();
+
+ if (!k8s_is_kubepod(cg)) {
+ rrdlabels_add(cg->chart_labels, "cgroup_name", cg->name, RRDLABEL_SRC_AUTO);
+ if (!rrdlabels_exist(cg->chart_labels, "image"))
+ rrdlabels_add(cg->chart_labels, "image", "", RRDLABEL_SRC_AUTO);
+ }
+
+ worker_is_busy(WORKER_DISCOVERY_PROCESS_NETWORK);
+ read_cgroup_network_interfaces(cg);
+}
+
+static inline void discovery_find_all_cgroups() {
+ netdata_log_debug(D_CGROUP, "searching for cgroups");
+
+ worker_is_busy(WORKER_DISCOVERY_INIT);
+ discovery_mark_as_unavailable_all_cgroups();
+
+ worker_is_busy(WORKER_DISCOVERY_FIND);
+ if (!cgroup_use_unified_cgroups) {
+ discovery_find_all_cgroups_v1();
+ } else {
+ discovery_find_all_cgroups_v2();
+ }
+
+ for (struct cgroup *cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
+ worker_is_busy(WORKER_DISCOVERY_PROCESS);
+ discovery_process_cgroup(cg);
+ }
+
+ worker_is_busy(WORKER_DISCOVERY_UPDATE);
+ discovery_update_filenames_all_cgroups();
+
+ worker_is_busy(WORKER_DISCOVERY_LOCK);
+ uv_mutex_lock(&cgroup_root_mutex);
+
+ worker_is_busy(WORKER_DISCOVERY_CLEANUP);
+ discovery_cleanup_all_cgroups();
+
+ worker_is_busy(WORKER_DISCOVERY_COPY);
+ discovery_copy_discovered_cgroups_to_reader();
+
+ uv_mutex_unlock(&cgroup_root_mutex);
+
+ worker_is_busy(WORKER_DISCOVERY_SHARE);
+ discovery_share_cgroups_with_ebpf();
+
+ netdata_log_debug(D_CGROUP, "done searching for cgroups");
+}
+
+void cgroup_discovery_worker(void *ptr)
+{
+ UNUSED(ptr);
+
+ worker_register("CGROUPSDISC");
+ worker_register_job_name(WORKER_DISCOVERY_INIT, "init");
+ worker_register_job_name(WORKER_DISCOVERY_FIND, "find");
+ worker_register_job_name(WORKER_DISCOVERY_PROCESS, "process");
+ worker_register_job_name(WORKER_DISCOVERY_PROCESS_RENAME, "rename");
+ worker_register_job_name(WORKER_DISCOVERY_PROCESS_NETWORK, "network");
+ worker_register_job_name(WORKER_DISCOVERY_PROCESS_FIRST_TIME, "new");
+ worker_register_job_name(WORKER_DISCOVERY_UPDATE, "update");
+ worker_register_job_name(WORKER_DISCOVERY_CLEANUP, "cleanup");
+ worker_register_job_name(WORKER_DISCOVERY_COPY, "copy");
+ worker_register_job_name(WORKER_DISCOVERY_SHARE, "share");
+ worker_register_job_name(WORKER_DISCOVERY_LOCK, "lock");
+
+ entrypoint_parent_process_comm = simple_pattern_create(
+ " runc:[* " // http://terenceli.github.io/%E6%8A%80%E6%9C%AF/2021/12/28/runc-internals-3)
+ " exe ", // https://github.com/falcosecurity/falco/blob/9d41b0a151b83693929d3a9c84f7c5c85d070d3a/rules/falco_rules.yaml#L1961
+ NULL,
+ SIMPLE_PATTERN_EXACT, true);
+
+ service_register(SERVICE_THREAD_TYPE_LIBUV, NULL, NULL, NULL, false);
+
+ while (service_running(SERVICE_COLLECTORS)) {
+ worker_is_idle();
+
+ uv_mutex_lock(&discovery_thread.mutex);
+ uv_cond_wait(&discovery_thread.cond_var, &discovery_thread.mutex);
+ uv_mutex_unlock(&discovery_thread.mutex);
+
+ if (unlikely(!service_running(SERVICE_COLLECTORS)))
+ break;
+
+ discovery_find_all_cgroups();
+ }
+ collector_info("discovery thread stopped");
+ worker_unregister();
+ service_exits();
+ __atomic_store_n(&discovery_thread.exited,1,__ATOMIC_RELAXED);
+}
diff --git a/collectors/cgroups.plugin/cgroup-internals.h b/collectors/cgroups.plugin/cgroup-internals.h
new file mode 100644
index 000000000..a69802240
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-internals.h
@@ -0,0 +1,514 @@
+#include "sys_fs_cgroup.h"
+
+#ifndef NETDATA_CGROUP_INTERNALS_H
+#define NETDATA_CGROUP_INTERNALS_H 1
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_DEFAULT
+#else
+#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_NO_ERROR_ON_FILE_IO
+#endif
+
+struct blkio {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int delay_counter;
+
+ char *filename;
+
+ unsigned long long Read;
+ unsigned long long Write;
+/*
+ unsigned long long Sync;
+ unsigned long long Async;
+ unsigned long long Total;
+*/
+};
+
+struct pids {
+ char *pids_current_filename;
+ int pids_current_updated;
+ unsigned long long pids_current;
+};
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
+struct memory {
+ ARL_BASE *arl_base;
+ ARL_ENTRY *arl_dirty;
+ ARL_ENTRY *arl_swap;
+
+ int updated_detailed;
+ int updated_usage_in_bytes;
+ int updated_msw_usage_in_bytes;
+ int updated_failcnt;
+
+ int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+ int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ int delay_counter_detailed;
+ int delay_counter_failcnt;
+
+ char *filename_detailed;
+ char *filename_usage_in_bytes;
+ char *filename_msw_usage_in_bytes;
+ char *filename_failcnt;
+
+ int detailed_has_dirty;
+ int detailed_has_swap;
+
+ // detailed metrics
+/*
+ unsigned long long cache;
+ unsigned long long rss;
+ unsigned long long rss_huge;
+ unsigned long long mapped_file;
+ unsigned long long writeback;
+ unsigned long long dirty;
+ unsigned long long swap;
+ unsigned long long pgpgin;
+ unsigned long long pgpgout;
+ unsigned long long pgfault;
+ unsigned long long pgmajfault;
+ unsigned long long inactive_anon;
+ unsigned long long active_anon;
+ unsigned long long inactive_file;
+ unsigned long long active_file;
+ unsigned long long unevictable;
+ unsigned long long hierarchical_memory_limit;
+*/
+ //unified cgroups metrics
+ unsigned long long anon;
+ unsigned long long kernel_stack;
+ unsigned long long slab;
+ unsigned long long sock;
+ // unsigned long long shmem;
+ unsigned long long anon_thp;
+ //unsigned long long file_writeback;
+ //unsigned long long file_dirty;
+ //unsigned long long file;
+
+ unsigned long long total_cache;
+ unsigned long long total_rss;
+ unsigned long long total_rss_huge;
+ unsigned long long total_mapped_file;
+ unsigned long long total_writeback;
+ unsigned long long total_dirty;
+ unsigned long long total_swap;
+ unsigned long long total_pgpgin;
+ unsigned long long total_pgpgout;
+ unsigned long long total_pgfault;
+ unsigned long long total_pgmajfault;
+/*
+ unsigned long long total_inactive_anon;
+ unsigned long long total_active_anon;
+*/
+
+ unsigned long long total_inactive_file;
+
+/*
+ unsigned long long total_active_file;
+ unsigned long long total_unevictable;
+*/
+
+ // single file metrics
+ unsigned long long usage_in_bytes;
+ unsigned long long msw_usage_in_bytes;
+ unsigned long long failcnt;
+};
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
+struct cpuacct_stat {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ char *filename;
+
+ unsigned long long user; // v1, v2(user_usec)
+ unsigned long long system; // v1, v2(system_usec)
+};
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
+struct cpuacct_usage {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ char *filename;
+
+ unsigned int cpus;
+ unsigned long long *cpu_percpu;
+};
+
+// represents cpuacct/cpu.stat, for v2 'cpuacct_stat' is used for 'user_usec', 'system_usec'
+struct cpuacct_cpu_throttling {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ char *filename;
+
+ unsigned long long nr_periods;
+ unsigned long long nr_throttled;
+ unsigned long long throttled_time;
+
+ unsigned long long nr_throttled_perc;
+};
+
+// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu#sect-cfs
+// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_monitoring_and_updating_the_kernel/using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications_managing-monitoring-and-updating-the-kernel#proc_controlling-distribution-of-cpu-time-for-applications-by-adjusting-cpu-weight_using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications
+struct cpuacct_cpu_shares {
+ int updated;
+ int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
+
+ char *filename;
+
+ unsigned long long shares;
+};
+
+struct cgroup_network_interface {
+ const char *host_device;
+ const char *container_device;
+ struct cgroup_network_interface *next;
+};
+
+enum cgroups_container_orchestrator {
+ CGROUPS_ORCHESTRATOR_UNSET,
+ CGROUPS_ORCHESTRATOR_UNKNOWN,
+ CGROUPS_ORCHESTRATOR_K8S
+};
+
+
+// *** WARNING *** The fields are not thread safe. Take care of safe usage.
+struct cgroup {
+ uint32_t options;
+
+ int first_time_seen; // first time seen by the discoverer
+ int processed; // the discoverer is done processing a cgroup (resolved name, set 'enabled' option)
+
+ char available; // found in the filesystem
+ char enabled; // enabled in the config
+
+ bool function_ready; // true after the first iteration of chart creation/update
+
+ char pending_renames;
+
+ char *id;
+ uint32_t hash;
+
+ char *intermediate_id; // TODO: remove it when the renaming script is fixed
+
+ char *chart_id;
+ uint32_t hash_chart_id;
+
+ // 'cgroup_name' label value.
+ // by default this is the *id (path), later changed to the resolved name (cgroup-name.sh) or systemd service name.
+ char *name;
+
+ RRDLABELS *chart_labels;
+
+ int container_orchestrator;
+
+ struct cpuacct_stat cpuacct_stat;
+ struct cpuacct_usage cpuacct_usage;
+ struct cpuacct_cpu_throttling cpuacct_cpu_throttling;
+ struct cpuacct_cpu_shares cpuacct_cpu_shares;
+
+ struct memory memory;
+
+ struct blkio io_service_bytes; // bytes
+ struct blkio io_serviced; // operations
+
+ struct blkio throttle_io_service_bytes; // bytes
+ struct blkio throttle_io_serviced; // operations
+
+ struct blkio io_merged; // operations
+ struct blkio io_queued; // operations
+
+ struct pids pids;
+
+ struct cgroup_network_interface *interfaces;
+
+ struct pressure cpu_pressure;
+ struct pressure io_pressure;
+ struct pressure memory_pressure;
+ struct pressure irq_pressure;
+
+ // Cpu
+ RRDSET *st_cpu;
+ RRDDIM *st_cpu_rd_user;
+ RRDDIM *st_cpu_rd_system;
+
+ RRDSET *st_cpu_limit;
+ RRDSET *st_cpu_per_core;
+ RRDSET *st_cpu_nr_throttled;
+ RRDSET *st_cpu_throttled_time;
+ RRDSET *st_cpu_shares;
+
+ // Memory
+ RRDSET *st_mem;
+ RRDDIM *st_mem_rd_ram;
+ RRDDIM *st_mem_rd_swap;
+
+ RRDSET *st_mem_utilization;
+ RRDSET *st_writeback;
+ RRDSET *st_mem_activity;
+ RRDSET *st_pgfaults;
+ RRDSET *st_mem_usage;
+ RRDSET *st_mem_usage_limit;
+ RRDSET *st_mem_failcnt;
+
+ // Blkio
+ RRDSET *st_io;
+ RRDDIM *st_io_rd_read;
+ RRDDIM *st_io_rd_written;
+
+ RRDSET *st_serviced_ops;
+
+ RRDSET *st_throttle_io;
+ RRDDIM *st_throttle_io_rd_read;
+ RRDDIM *st_throttle_io_rd_written;
+
+ RRDSET *st_throttle_serviced_ops;
+
+ RRDSET *st_queued_ops;
+ RRDSET *st_merged_ops;
+
+ // Pids
+ RRDSET *st_pids;
+ RRDDIM *st_pids_rd_pids_current;
+
+ // per cgroup chart variables
+ char *filename_cpuset_cpus;
+ unsigned long long cpuset_cpus;
+
+ char *filename_cpu_cfs_period;
+ unsigned long long cpu_cfs_period;
+
+ char *filename_cpu_cfs_quota;
+ unsigned long long cpu_cfs_quota;
+
+ const RRDSETVAR_ACQUIRED *chart_var_cpu_limit;
+ NETDATA_DOUBLE prev_cpu_usage;
+
+ char *filename_memory_limit;
+ unsigned long long memory_limit;
+ const RRDSETVAR_ACQUIRED *chart_var_memory_limit;
+
+ char *filename_memoryswap_limit;
+ unsigned long long memoryswap_limit;
+ const RRDSETVAR_ACQUIRED *chart_var_memoryswap_limit;
+
+ const DICTIONARY_ITEM *cgroup_netdev_link;
+
+ struct cgroup *next;
+ struct cgroup *discovered_next;
+
+};
+
+struct discovery_thread {
+ uv_thread_t thread;
+ uv_mutex_t mutex;
+ uv_cond_t cond_var;
+ int exited;
+};
+
+extern struct discovery_thread discovery_thread;
+
+extern char *cgroups_rename_script;
+extern char cgroup_chart_id_prefix[];
+extern char services_chart_id_prefix[];
+extern uv_mutex_t cgroup_root_mutex;
+
+void cgroup_discovery_worker(void *ptr);
+
+extern int is_inside_k8s;
+extern long system_page_size;
+extern int cgroup_enable_cpuacct_stat;
+extern int cgroup_enable_cpuacct_usage;
+extern int cgroup_enable_cpuacct_cpu_throttling;
+extern int cgroup_enable_cpuacct_cpu_shares;
+extern int cgroup_enable_memory;
+extern int cgroup_enable_detailed_memory;
+extern int cgroup_enable_memory_failcnt;
+extern int cgroup_enable_swap;
+extern int cgroup_enable_blkio_io;
+extern int cgroup_enable_blkio_ops;
+extern int cgroup_enable_blkio_throttle_io;
+extern int cgroup_enable_blkio_throttle_ops;
+extern int cgroup_enable_blkio_merged_ops;
+extern int cgroup_enable_blkio_queued_ops;
+extern int cgroup_enable_pressure_cpu;
+extern int cgroup_enable_pressure_io_some;
+extern int cgroup_enable_pressure_io_full;
+extern int cgroup_enable_pressure_memory_some;
+extern int cgroup_enable_pressure_memory_full;
+extern int cgroup_enable_pressure_irq_some;
+extern int cgroup_enable_pressure_irq_full;
+extern int cgroup_enable_systemd_services;
+extern int cgroup_enable_systemd_services_detailed_memory;
+extern int cgroup_used_memory;
+extern int cgroup_use_unified_cgroups;
+extern int cgroup_unified_exist;
+extern int cgroup_search_in_devices;
+extern int cgroup_check_for_new_every;
+extern int cgroup_update_every;
+extern int cgroup_containers_chart_priority;
+extern int cgroup_recheck_zero_blkio_every_iterations;
+extern int cgroup_recheck_zero_mem_failcnt_every_iterations;
+extern int cgroup_recheck_zero_mem_detailed_every_iterations;
+extern char *cgroup_cpuacct_base;
+extern char *cgroup_cpuset_base;
+extern char *cgroup_blkio_base;
+extern char *cgroup_memory_base;
+extern char *cgroup_pids_base;
+extern char *cgroup_devices_base;
+extern char *cgroup_unified_base;
+extern int cgroup_root_count;
+extern int cgroup_root_max;
+extern int cgroup_max_depth;
+extern SIMPLE_PATTERN *enabled_cgroup_paths;
+extern SIMPLE_PATTERN *enabled_cgroup_names;
+extern SIMPLE_PATTERN *search_cgroup_paths;
+extern SIMPLE_PATTERN *enabled_cgroup_renames;
+extern SIMPLE_PATTERN *systemd_services_cgroups;
+extern SIMPLE_PATTERN *entrypoint_parent_process_comm;
+extern char *cgroups_network_interface_script;
+extern int cgroups_check;
+extern uint32_t Read_hash;
+extern uint32_t Write_hash;
+extern uint32_t user_hash;
+extern uint32_t system_hash;
+extern uint32_t user_usec_hash;
+extern uint32_t system_usec_hash;
+extern uint32_t nr_periods_hash;
+extern uint32_t nr_throttled_hash;
+extern uint32_t throttled_time_hash;
+extern uint32_t throttled_usec_hash;
+extern struct cgroup *cgroup_root;
+
+extern netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf;
+extern int shm_fd_cgroup_ebpf;
+extern sem_t *shm_mutex_cgroup_ebpf;
+
+enum cgroups_type { CGROUPS_AUTODETECT_FAIL, CGROUPS_V1, CGROUPS_V2 };
+
+enum cgroups_systemd_setting {
+ SYSTEMD_CGROUP_ERR,
+ SYSTEMD_CGROUP_LEGACY,
+ SYSTEMD_CGROUP_HYBRID,
+ SYSTEMD_CGROUP_UNIFIED
+};
+
+struct cgroups_systemd_config_setting {
+ char *name;
+ enum cgroups_systemd_setting setting;
+};
+
+extern struct cgroups_systemd_config_setting cgroups_systemd_options[];
+
+static inline int matches_enabled_cgroup_paths(char *id) {
+ return simple_pattern_matches(enabled_cgroup_paths, id);
+}
+
+static inline int matches_enabled_cgroup_names(char *name) {
+ return simple_pattern_matches(enabled_cgroup_names, name);
+}
+
+static inline int matches_enabled_cgroup_renames(char *id) {
+ return simple_pattern_matches(enabled_cgroup_renames, id);
+}
+
+static inline int matches_systemd_services_cgroups(char *id) {
+ return simple_pattern_matches(systemd_services_cgroups, id);
+}
+
+static inline int matches_search_cgroup_paths(const char *dir) {
+ return simple_pattern_matches(search_cgroup_paths, dir);
+}
+
+static inline int matches_entrypoint_parent_process_comm(const char *comm) {
+ return simple_pattern_matches(entrypoint_parent_process_comm, comm);
+}
+
+static inline int is_cgroup_systemd_service(struct cgroup *cg) {
+ return (int)(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE);
+}
+
+static inline int k8s_is_kubepod(struct cgroup *cg) {
+ return cg->container_orchestrator == CGROUPS_ORCHESTRATOR_K8S;
+}
+
+static inline char *cgroup_chart_type(char *buffer, struct cgroup *cg) {
+ buffer[0] = '\0';
+
+ if (cg->chart_id[0] == '\0' || (cg->chart_id[0] == '/' && cg->chart_id[1] == '\0'))
+ strncpy(buffer, "cgroup_root", RRD_ID_LENGTH_MAX);
+ else if (is_cgroup_systemd_service(cg))
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", services_chart_id_prefix, cg->chart_id);
+ else
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", cgroup_chart_id_prefix, cg->chart_id);
+
+ return buffer;
+}
+
+#define RRDFUNCTIONS_CGTOP_HELP "View running containers"
+
+int cgroup_function_cgroup_top(BUFFER *wb, int timeout, const char *function, void *collector_data,
+ rrd_function_result_callback_t result_cb, void *result_cb_data,
+ rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
+ rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data);
+int cgroup_function_systemd_top(BUFFER *wb, int timeout, const char *function, void *collector_data,
+ rrd_function_result_callback_t result_cb, void *result_cb_data,
+ rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
+ rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data);
+
+void cgroup_netdev_link_init(void);
+const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg);
+void cgroup_netdev_delete(struct cgroup *cg);
+
+void update_cpu_utilization_chart(struct cgroup *cg);
+void update_cpu_utilization_limit_chart(struct cgroup *cg, NETDATA_DOUBLE cpu_limit);
+void update_cpu_throttled_chart(struct cgroup *cg);
+void update_cpu_throttled_duration_chart(struct cgroup *cg);
+void update_cpu_shares_chart(struct cgroup *cg);
+void update_cpu_per_core_usage_chart(struct cgroup *cg);
+
+void update_mem_usage_limit_chart(struct cgroup *cg, unsigned long long memory_limit);
+void update_mem_utilization_chart(struct cgroup *cg, unsigned long long memory_limit);
+void update_mem_usage_detailed_chart(struct cgroup *cg);
+void update_mem_writeback_chart(struct cgroup *cg);
+void update_mem_activity_chart(struct cgroup *cg);
+void update_mem_pgfaults_chart(struct cgroup *cg);
+void update_mem_failcnt_chart(struct cgroup *cg);
+void update_mem_usage_chart(struct cgroup *cg);
+
+void update_io_serviced_bytes_chart(struct cgroup *cg);
+void update_io_serviced_ops_chart(struct cgroup *cg);
+void update_throttle_io_serviced_bytes_chart(struct cgroup *cg);
+void update_throttle_io_serviced_ops_chart(struct cgroup *cg);
+void update_io_queued_ops_chart(struct cgroup *cg);
+void update_io_merged_ops_chart(struct cgroup *cg);
+
+void update_pids_current_chart(struct cgroup *cg);
+
+void update_cpu_some_pressure_chart(struct cgroup *cg);
+void update_cpu_some_pressure_stall_time_chart(struct cgroup *cg);
+void update_cpu_full_pressure_chart(struct cgroup *cg);
+void update_cpu_full_pressure_stall_time_chart(struct cgroup *cg);
+
+void update_mem_some_pressure_chart(struct cgroup *cg);
+void update_mem_some_pressure_stall_time_chart(struct cgroup *cg);
+void update_mem_full_pressure_chart(struct cgroup *cg);
+void update_mem_full_pressure_stall_time_chart(struct cgroup *cg);
+
+void update_irq_some_pressure_chart(struct cgroup *cg);
+void update_irq_some_pressure_stall_time_chart(struct cgroup *cg);
+void update_irq_full_pressure_chart(struct cgroup *cg);
+void update_irq_full_pressure_stall_time_chart(struct cgroup *cg);
+
+void update_io_some_pressure_chart(struct cgroup *cg);
+void update_io_some_pressure_stall_time_chart(struct cgroup *cg);
+void update_io_full_pressure_chart(struct cgroup *cg);
+void update_io_full_pressure_stall_time_chart(struct cgroup *cg);
+
+#endif // NETDATA_CGROUP_INTERNALS_H \ No newline at end of file
diff --git a/collectors/cgroups.plugin/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh
deleted file mode 100755
index c0f3d0cb6..000000000
--- a/collectors/cgroups.plugin/cgroup-name.sh
+++ /dev/null
@@ -1,659 +0,0 @@
-#!/usr/bin/env bash
-#shellcheck disable=SC2001
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Script to find a better name for cgroups
-#
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
-export LC_ALL=C
-
-# -----------------------------------------------------------------------------
-
-PROGRAM_NAME="$(basename "${0}")"
-
-LOG_LEVEL_ERR=1
-LOG_LEVEL_WARN=2
-LOG_LEVEL_INFO=3
-LOG_LEVEL="$LOG_LEVEL_INFO"
-
-set_log_severity_level() {
- case ${NETDATA_LOG_SEVERITY_LEVEL,,} in
- "info") LOG_LEVEL="$LOG_LEVEL_INFO";;
- "warn" | "warning") LOG_LEVEL="$LOG_LEVEL_WARN";;
- "err" | "error") LOG_LEVEL="$LOG_LEVEL_ERR";;
- esac
-}
-
-set_log_severity_level
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-info() {
- [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_INFO" -gt "$LOG_LEVEL" ]] && return
- log INFO "${@}"
-}
-
-warning() {
- [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_WARN" -gt "$LOG_LEVEL" ]] && return
- log WARNING "${@}"
-}
-
-error() {
- [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_ERR" -gt "$LOG_LEVEL" ]] && return
- log ERROR "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-function parse_docker_like_inspect_output() {
- local output="${1}"
- eval "$(grep -E "^(NOMAD_NAMESPACE|NOMAD_JOB_NAME|NOMAD_TASK_NAME|NOMAD_SHORT_ALLOC_ID|CONT_NAME|IMAGE_NAME)=" <<<"$output")"
- if [ -n "$NOMAD_NAMESPACE" ] && [ -n "$NOMAD_JOB_NAME" ] && [ -n "$NOMAD_TASK_NAME" ] && [ -n "$NOMAD_SHORT_ALLOC_ID" ]; then
- NAME="${NOMAD_NAMESPACE}-${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}-${NOMAD_SHORT_ALLOC_ID}"
- else
- NAME=$(echo "${CONT_NAME}" | sed 's|^/||')
- fi
- if [ -n "${IMAGE_NAME}" ]; then
- LABELS="image=\"${IMAGE_NAME}\""
- fi
-}
-
-function docker_like_get_name_command() {
- local command="${1}"
- local id="${2}"
- info "Running command: ${command} inspect --format='{{range .Config.Env}}{{println .}}{{end}}CONT_NAME={{ .Name}}' \"${id}\""
- if OUTPUT="$(${command} inspect --format='{{range .Config.Env}}{{println .}}{{end}}CONT_NAME={{ .Name}}{{println}}IMAGE_NAME={{ .Config.Image}}' "${id}")" &&
- [ -n "$OUTPUT" ]; then
- parse_docker_like_inspect_output "$OUTPUT"
- fi
- return 0
-}
-
-function docker_like_get_name_api() {
- local host_var="${1}"
- local host="${!host_var}"
- local path="/containers/${2}/json"
- if [ -z "${host}" ]; then
- warning "No ${host_var} is set"
- return 1
- fi
- if ! command -v jq >/dev/null 2>&1; then
- warning "Can't find jq command line tool. jq is required for netdata to retrieve container name using ${host} API, falling back to docker ps"
- return 1
- fi
- if [ -S "${host}" ]; then
- info "Running API command: curl --unix-socket \"${host}\" http://localhost${path}"
- JSON=$(curl -sS --unix-socket "${host}" "http://localhost${path}")
- else
- info "Running API command: curl \"${host}${path}\""
- JSON=$(curl -sS "${host}${path}")
- fi
- if OUTPUT=$(echo "${JSON}" | jq -r '.Config.Env[],"CONT_NAME=\(.Name)","IMAGE_NAME=\(.Config.Image)"') && [ -n "$OUTPUT" ]; then
- parse_docker_like_inspect_output "$OUTPUT"
- fi
- return 0
-}
-
-# get_lbl_val returns the value for the label with the given name.
-# Returns "null" string if the label doesn't exist.
-# Expected labels format: 'name="value",...'.
-function get_lbl_val() {
- local labels want_name
- labels="${1}"
- want_name="${2}"
-
- IFS=, read -ra labels <<< "$labels"
-
- local lname lval
- for l in "${labels[@]}"; do
- IFS="=" read -r lname lval <<< "$l"
- if [ "$want_name" = "$lname" ] && [ -n "$lval" ]; then
- echo "${lval:1:-1}" # trim "
- return 0
- fi
- done
-
- echo "null"
- return 1
-}
-
-function add_lbl_prefix() {
- local orig_labels prefix
- orig_labels="${1}"
- prefix="${2}"
-
- IFS=, read -ra labels <<< "$orig_labels"
-
- local new_labels
- for l in "${labels[@]}"; do
- new_labels+="${prefix}${l},"
- done
-
- echo "${new_labels:0:-1}" # trim last ','
-}
-
-function remove_lbl() {
- local orig_labels lbl_name
- orig_labels="${1}"
- lbl_name="${2}"
-
- IFS=, read -ra labels <<< "$orig_labels"
-
- local new_labels
- for l in "${labels[@]}"; do
- IFS="=" read -r lname lval <<< "$l"
- [ "$lbl_name" != "$lname" ] && new_labels+="${l},"
- done
-
- echo "${new_labels:0:-1}" # trim last ','
-}
-
-function k8s_is_pause_container() {
- local cgroup_path="${1}"
-
- local file
- if [ -d "${NETDATA_HOST_PREFIX}/sys/fs/cgroup/cpuacct" ]; then
- file="${NETDATA_HOST_PREFIX}/sys/fs/cgroup/cpuacct/$cgroup_path/cgroup.procs"
- else
- file="${NETDATA_HOST_PREFIX}/sys/fs/cgroup/$cgroup_path/cgroup.procs"
- fi
-
- [ ! -f "$file" ] && return 1
-
- local procs
- IFS= read -rd' ' procs 2>/dev/null <"$file"
- #shellcheck disable=SC2206
- procs=($procs)
-
- [ "${#procs[@]}" -ne 1 ] && return 1
-
- IFS= read -r comm 2>/dev/null <"/proc/${procs[0]}/comm"
-
- [ "$comm" == "pause" ]
- return
-}
-
-function k8s_gcp_get_cluster_name() {
- local header url id loc name
- header="Metadata-Flavor: Google"
- url="http://metadata/computeMetadata/v1"
- if id=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/project/project-id") &&
- loc=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/instance/attributes/cluster-location") &&
- name=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/instance/attributes/cluster-name") &&
- [ -n "$id" ] && [ -n "$loc" ] && [ -n "$name" ]; then
- echo "gke_${id}_${loc}_${name}"
- return 0
- fi
- return 1
-}
-
-# k8s_get_kubepod_name resolves */kubepods/* cgroup name.
-# pod level cgroup name format: 'pod_<namespace>_<pod_name>'
-# container level cgroup name format: 'cntr_<namespace>_<pod_name>_<container_name>'
-function k8s_get_kubepod_name() {
- # GKE /sys/fs/cgroup/*/ (cri=docker, cgroups=v1):
- # |-- kubepods
- # | |-- burstable
- # | | |-- pod98cee708-023b-11eb-933d-42010a800193
- # | | | |-- 922161c98e6ea450bf665226cdc64ca2aa3e889934c2cff0aec4325f8f78ac03
- # | `-- pode314bbac-d577-11ea-a171-42010a80013b
- # | |-- 7d505356b04507de7b710016d540b2759483ed5f9136bb01a80872b08f771930
- #
- # GKE /sys/fs/cgroup/*/ (cri=containerd, cgroups=v1):
- # |-- kubepods.slice
- # | |-- kubepods-besteffort.slice
- # | | |-- kubepods-besteffort-pode1465238_4518_4c21_832f_fd9f87033dad.slice
- # | | | |-- cri-containerd-66be9b2efdf4d85288c319b8c1a2f50d2439b5617e36f45d9d0d0be1381113be.scope
- # | `-- kubepods-pod91f5b561_369f_4103_8015_66391059996a.slice
- # | |-- cri-containerd-24c53b774a586f06abc058619b47f71d9d869ac50c92898adbd199106fd0aaeb.scope
- #
- # GKE /sys/fs/cgroup/*/ (cri=crio, cgroups=v1):
- # |-- kubepods.slice
- # | |-- kubepods-besteffort.slice
- # | | |-- kubepods-besteffort-podad412dfe_3589_4056_965a_592356172968.slice
- # | | | |-- crio-77b019312fd9825828b70214b2c94da69c30621af2a7ee06f8beace4bc9439e5.scope
- #
- # Minikube (v1.8.2) /sys/fs/cgroup/*/ (cri=docker, cgroups=v1):
- # |-- kubepods.slice
- # | |-- kubepods-besteffort.slice
- # | | |-- kubepods-besteffort-pod10fb5647_c724_400c_b9cc_0e6eae3110e7.slice
- # | | | |-- docker-36e5eb5056dfdf6dbb75c0c44a1ecf23217fe2c50d606209d8130fcbb19fb5a7.scope
- #
- # kind v0.14.0
- # |-- kubelet.slice
- # | |-- kubelet-kubepods.slice
- # | | |-- kubelet-kubepods-besteffort.slice
- # | | | |-- kubelet-kubepods-besteffort-pod7881ed9e_c63e_4425_b5e0_ac55a08ae939.slice
- # | | | | |-- cri-containerd-00c7939458bffc416bb03451526e9fde13301d6654cfeadf5b4964a7fb5be1a9.scope
- #
- # NOTE: cgroups plugin
- # - uses '_' to join dir names (so it is <parent>_<child>_<child>_...)
- # - replaces '.' with '-'
-
- local fn="${FUNCNAME[0]}"
- local cgroup_path="${1}"
- local id="${2}"
-
- if [[ ! $id =~ ^.*kubepods.* ]]; then
- warning "${fn}: '${id}' is not kubepod cgroup."
- return 1
- fi
-
- local clean_id="$id"
- clean_id=${clean_id//.slice/}
- clean_id=${clean_id//.scope/}
-
- local name pod_uid cntr_id
- if [[ $clean_id == "kubepods" ]]; then
- name="$clean_id"
- elif [[ $clean_id =~ .+(besteffort|burstable|guaranteed)$ ]]; then
- # kubepods_<QOS_CLASS>
- # kubepods_kubepods-<QOS_CLASS>
- name=${clean_id//-/_}
- name=${name/#kubepods_kubepods/kubepods}
- elif [[ $clean_id =~ .+pod[a-f0-9_-]+_(docker|crio|cri-containerd)-([a-f0-9]+)$ ]]; then
- # ...pod<POD_UID>_(docker|crio|cri-containerd)-<CONTAINER_ID> (POD_UID w/ "_")
- cntr_id=${BASH_REMATCH[2]}
- elif [[ $clean_id =~ .+pod[a-f0-9-]+_([a-f0-9]+)$ ]]; then
- # ...pod<POD_UID>_<CONTAINER_ID>
- cntr_id=${BASH_REMATCH[1]}
- elif [[ $clean_id =~ .+pod([a-f0-9_-]+)$ ]]; then
- # ...pod<POD_UID> (POD_UID w/ and w/o "_")
- pod_uid=${BASH_REMATCH[1]}
- pod_uid=${pod_uid//_/-}
- fi
-
- if [ -n "$name" ]; then
- echo "$name"
- return 0
- fi
-
- if [ -z "$pod_uid" ] && [ -z "$cntr_id" ]; then
- warning "${fn}: can't extract pod_uid or container_id from the cgroup '$id'."
- return 3
- fi
-
- [ -n "$pod_uid" ] && info "${fn}: cgroup '$id' is a pod(uid:$pod_uid)"
- [ -n "$cntr_id" ] && info "${fn}: cgroup '$id' is a container(id:$cntr_id)"
-
- if [ -n "$cntr_id" ] && k8s_is_pause_container "$cgroup_path"; then
- return 3
- fi
-
- if ! command -v jq > /dev/null 2>&1; then
- warning "${fn}: 'jq' command not available."
- return 1
- fi
-
- local tmp_kube_cluster_name="${TMPDIR:-"/tmp"}/netdata-cgroups-k8s-cluster-name"
- local tmp_kube_system_ns_uid_file="${TMPDIR:-"/tmp"}/netdata-cgroups-kubesystem-uid"
- local tmp_kube_containers_file="${TMPDIR:-"/tmp"}/netdata-cgroups-containers"
-
- local kube_cluster_name
- local kube_system_uid
- local labels
-
- if [ -n "$cntr_id" ] &&
- [ -f "$tmp_kube_cluster_name" ] &&
- [ -f "$tmp_kube_system_ns_uid_file" ] &&
- [ -f "$tmp_kube_containers_file" ] &&
- labels=$(grep "$cntr_id" "$tmp_kube_containers_file" 2>/dev/null); then
- IFS= read -r kube_system_uid 2>/dev/null <"$tmp_kube_system_ns_uid_file"
- IFS= read -r kube_cluster_name 2>/dev/null <"$tmp_kube_cluster_name"
- else
- IFS= read -r kube_system_uid 2>/dev/null <"$tmp_kube_system_ns_uid_file"
- IFS= read -r kube_cluster_name 2>/dev/null <"$tmp_kube_cluster_name"
- [ -z "$kube_cluster_name" ] && ! kube_cluster_name=$(k8s_gcp_get_cluster_name) && kube_cluster_name="unknown"
-
- local kube_system_ns
- local pods
-
- if [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_PORT_443_TCP_PORT}" ]; then
- local token header host url
- token="$(</var/run/secrets/kubernetes.io/serviceaccount/token)"
- header="Authorization: Bearer $token"
- host="$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT"
-
- if [ -z "$kube_system_uid" ]; then
- url="https://$host/api/v1/namespaces/kube-system"
- # FIX: check HTTP response code
- if ! kube_system_ns=$(curl --fail -sSk -H "$header" "$url" 2>&1); then
- warning "${fn}: error on curl '${url}': ${kube_system_ns}."
- fi
- fi
-
- local url
- if [ -n "${USE_KUBELET_FOR_PODS_METADATA}" ]; then
- url="${KUBELET_URL:-https://localhost:10250}/pods"
- else
- url="https://$host/api/v1/pods"
- [ -n "$MY_NODE_NAME" ] && url+="?fieldSelector=spec.nodeName==$MY_NODE_NAME"
- fi
-
- # FIX: check HTTP response code
- if ! pods=$(curl --fail -sSk -H "$header" "$url" 2>&1); then
- warning "${fn}: error on curl '${url}': ${pods}."
- return 1
- fi
- elif ps -C kubelet >/dev/null 2>&1 && command -v kubectl >/dev/null 2>&1; then
- if [ -z "$kube_system_uid" ]; then
- if ! kube_system_ns=$(kubectl --kubeconfig="$KUBE_CONFIG" get namespaces kube-system -o json 2>&1); then
- warning "${fn}: error on 'kubectl': ${kube_system_ns}."
- fi
- fi
-
- [[ -z ${KUBE_CONFIG+x} ]] && KUBE_CONFIG="/etc/kubernetes/admin.conf"
- if ! pods=$(kubectl --kubeconfig="$KUBE_CONFIG" get pods --all-namespaces -o json 2>&1); then
- warning "${fn}: error on 'kubectl': ${pods}."
- return 1
- fi
- else
- warning "${fn}: not inside the k8s cluster and 'kubectl' command not available."
- return 1
- fi
-
- if [ -n "$kube_system_ns" ] && ! kube_system_uid=$(jq -r '.metadata.uid' <<<"$kube_system_ns" 2>&1); then
- warning "${fn}: error on 'jq' parse kube_system_ns: ${kube_system_uid}."
- fi
-
- local jq_filter
- jq_filter+='.items[] | "'
- jq_filter+='namespace=\"\(.metadata.namespace)\",'
- jq_filter+='pod_name=\"\(.metadata.name)\",'
- jq_filter+='pod_uid=\"\(.metadata.uid)\",'
- #jq_filter+='\(.metadata.labels | to_entries | map("pod_label_"+.key+"=\""+.value+"\"") | join(",") | if length > 0 then .+"," else . end)'
- jq_filter+='\((.metadata.ownerReferences[]? | select(.controller==true) | "controller_kind=\""+.kind+"\",controller_name=\""+.name+"\",") // "")'
- jq_filter+='node_name=\"\(.spec.nodeName)\",'
- jq_filter+='" + '
- jq_filter+='(.status.containerStatuses[]? | "'
- jq_filter+='container_name=\"\(.name)\",'
- jq_filter+='container_id=\"\(.containerID)\"'
- jq_filter+='") | '
- jq_filter+='sub("(docker|cri-o|containerd)://";"")' # containerID: docker://a346da9bc0e3eaba6b295f64ac16e02f2190db2cef570835706a9e7a36e2c722
-
- local containers
- if ! containers=$(jq -r "${jq_filter}" <<<"$pods" 2>&1); then
- warning "${fn}: error on 'jq' parse pods: ${containers}."
- return 1
- fi
-
- [ -n "$kube_cluster_name" ] && echo "$kube_cluster_name" >"$tmp_kube_cluster_name" 2>/dev/null
- [ -n "$kube_system_ns" ] && [ -n "$kube_system_uid" ] && echo "$kube_system_uid" >"$tmp_kube_system_ns_uid_file" 2>/dev/null
- echo "$containers" >"$tmp_kube_containers_file" 2>/dev/null
- fi
-
- local qos_class
- if [[ $clean_id =~ .+(besteffort|burstable) ]]; then
- qos_class="${BASH_REMATCH[1]}"
- else
- qos_class="guaranteed"
- fi
-
- # available labels:
- # namespace, pod_name, pod_uid, container_name, container_id, node_name
- if [ -n "$cntr_id" ]; then
- if [ -n "$labels" ] || labels=$(grep "$cntr_id" <<< "$containers" 2> /dev/null); then
- labels+=',kind="container"'
- labels+=",qos_class=\"$qos_class\""
- [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
- [ -n "$kube_cluster_name" ] && [ "$kube_cluster_name" != "unknown" ] && labels+=",cluster_name=\"$kube_cluster_name\""
- name="cntr"
- name+="_$(get_lbl_val "$labels" namespace)"
- name+="_$(get_lbl_val "$labels" pod_name)"
- name+="_$(get_lbl_val "$labels" container_name)"
- labels=$(remove_lbl "$labels" "container_id")
- labels=$(remove_lbl "$labels" "pod_uid")
- labels=$(add_lbl_prefix "$labels" "k8s_")
- name+=" $labels"
- else
- return 2
- fi
- elif [ -n "$pod_uid" ]; then
- if labels=$(grep "$pod_uid" -m 1 <<< "$containers" 2> /dev/null); then
- labels="${labels%%,container_*}"
- labels+=',kind="pod"'
- labels+=",qos_class=\"$qos_class\""
- [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
- [ -n "$kube_cluster_name" ] && [ "$kube_cluster_name" != "unknown" ] && labels+=",cluster_name=\"$kube_cluster_name\""
- name="pod"
- name+="_$(get_lbl_val "$labels" namespace)"
- name+="_$(get_lbl_val "$labels" pod_name)"
- labels=$(remove_lbl "$labels" "pod_uid")
- labels=$(add_lbl_prefix "$labels" "k8s_")
- name+=" $labels"
- else
- return 2
- fi
- fi
-
- # jq filter nonexistent field and nonexistent label value is 'null'
- if [[ $name =~ _null(_|$) ]]; then
- warning "${fn}: invalid name: $name (cgroup '$id')"
- if [ -n "${USE_KUBELET_FOR_PODS_METADATA}" ]; then
- # local data is cached and may not contain the correct id
- return 2
- fi
- return 1
- fi
-
- echo "$name"
- [ -n "$name" ]
- return
-}
-
-function k8s_get_name() {
- local fn="${FUNCNAME[0]}"
- local cgroup_path="${1}"
- local id="${2}"
- local kubepod_name=""
-
- kubepod_name=$(k8s_get_kubepod_name "$cgroup_path" "$id")
-
- case "$?" in
- 0)
- kubepod_name="k8s_${kubepod_name}"
-
- local name labels
- name=${kubepod_name%% *}
- labels=${kubepod_name#* }
-
- if [ "$name" != "$labels" ]; then
- info "${fn}: cgroup '${id}' has chart name '${name}', labels '${labels}"
- NAME="$name"
- LABELS="$labels"
- else
- info "${fn}: cgroup '${id}' has chart name '${NAME}'"
- NAME="$name"
- fi
- EXIT_CODE=$EXIT_SUCCESS
- ;;
- 1)
- NAME="k8s_${id}"
- warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and enabling it."
- EXIT_CODE=$EXIT_SUCCESS
- ;;
- 2)
- NAME="k8s_${id}"
- warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and asking for retry."
- EXIT_CODE=$EXIT_RETRY
- ;;
- *)
- NAME="k8s_${id}"
- warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and disabling it."
- EXIT_CODE=$EXIT_DISABLE
- ;;
- esac
-}
-
-function docker_get_name() {
- local id="${1}"
- # See https://github.com/netdata/netdata/pull/13523 for details
- if command -v snap >/dev/null 2>&1 && snap list docker >/dev/null 2>&1; then
- docker_like_get_name_api DOCKER_HOST "${id}"
- elif hash docker 2> /dev/null; then
- docker_like_get_name_command docker "${id}"
- else
- docker_like_get_name_api DOCKER_HOST "${id}" || docker_like_get_name_command podman "${id}"
- fi
- if [ -z "${NAME}" ]; then
- warning "cannot find the name of docker container '${id}'"
- EXIT_CODE=$EXIT_RETRY
- NAME="${id:0:12}"
- else
- info "docker container '${id}' is named '${NAME}'"
- fi
-}
-
-function docker_validate_id() {
- local id="${1}"
- if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
- docker_get_name "${id}"
- else
- error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
- fi
-}
-
-function podman_get_name() {
- local id="${1}"
-
- # for Podman, prefer using the API if we can, as netdata will not normally have access
- # to other users' containers, so they will not be visible when running `podman ps`
- docker_like_get_name_api PODMAN_HOST "${id}" || docker_like_get_name_command podman "${id}"
-
- if [ -z "${NAME}" ]; then
- warning "cannot find the name of podman container '${id}'"
- EXIT_CODE=$EXIT_RETRY
- NAME="${id:0:12}"
- else
- info "podman container '${id}' is named '${NAME}'"
- fi
-}
-
-function podman_validate_id() {
- local id="${1}"
- if [ -n "${id}" ] && [ ${#id} -eq 64 ]; then
- podman_get_name "${id}"
- else
- error "a podman id cannot be extracted from docker cgroup '${CGROUP}'."
- fi
-}
-
-# -----------------------------------------------------------------------------
-
-DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
-PODMAN_HOST="${PODMAN_HOST:=/run/podman/podman.sock}"
-CGROUP_PATH="${1}" # the path as it is (e.g. '/docker/efcf4c409')
-CGROUP="${2}" # the modified path (e.g. 'docker_efcf4c409')
-EXIT_SUCCESS=0
-EXIT_RETRY=2
-EXIT_DISABLE=3
-EXIT_CODE=$EXIT_SUCCESS
-NAME=
-LABELS=
-
-# -----------------------------------------------------------------------------
-
-if [ -z "${CGROUP}" ]; then
- fatal "called without a cgroup name. Nothing to do."
-fi
-
-if [ -z "${NAME}" ]; then
- if [[ ${CGROUP} =~ ^.*kubepods.* ]]; then
- k8s_get_name "${CGROUP_PATH}" "${CGROUP}"
- fi
-fi
-
-if [ -z "${NAME}" ]; then
- if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # docker containers
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
- elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # ECS
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
- elif [[ ${CGROUP} =~ system.slice_containerd.service_cpuset_[a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # docker containers under containerd
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ystem.slice_containerd.service_cpuset_\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
- elif [[ ${CGROUP} =~ ^.*libpod-[a-fA-F0-9]+.*$ ]]; then
- # Podman
- PODMANID="$(echo "${CGROUP}" | sed "s|^.*libpod-\([a-fA-F0-9]\+\).*$|\1|")"
- podman_validate_id "${PODMANID}"
-
- elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
- # systemd-nspawn
- NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
-
- elif [[ ${CGROUP} =~ machine.slice_machine.*-lxc ]]; then
- # libvirtd / lxc containers
- # machine.slice machine-lxc/x2d969/x2dhubud0xians01.scope => lxc/hubud0xians01
- # machine.slice_machine-lxc/x2d969/x2dhubud0xians01.scope/libvirt_init.scope => lxc/hubud0xians01/libvirt_init
- NAME="lxc/$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-lxc//; s/[\/_]x2d[[:digit:]]*//; s/[\/_]x2d//g; s/\.scope//g')"
- elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
- # libvirtd / qemu virtual machines
- # machine.slice_machine-qemu_x2d1_x2dopnsense.scope => qemu_opnsense
- NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/[\/_]x2d[[:digit:]]*//; s/[\/_]x2d//g; s/\.scope//g')"
-
- elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
- # libvirtd / qemu virtual machines
- NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
-
- elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]; then
- # Proxmox VMs
-
- FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
- if [[ -f $FILENAME && -r $FILENAME ]]; then
- NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d /etc/pve ]]; then
- # Proxmox Containers (LXC)
-
- FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
- if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
- NAME=$(grep -e '^hostname: ' "/etc/pve/lxc/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- elif [[ ${CGROUP} =~ lxc.payload.* ]]; then
- # LXC 4.0
- NAME="$(echo "${CGROUP}" | sed 's/lxc\.payload\.\(.*\)/\1/g')"
- fi
-
- [ -z "${NAME}" ] && NAME="${CGROUP}"
- [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
-fi
-
-NAME="${NAME// /_}"
-
-info "cgroup '${CGROUP}' is called '${NAME}', labels '${LABELS}'"
-if [ -n "$LABELS" ]; then
- echo "${NAME} ${LABELS}"
-else
- echo "${NAME}"
-fi
-
-exit ${EXIT_CODE}
diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in
new file mode 100755
index 000000000..0f8b63256
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-name.sh.in
@@ -0,0 +1,706 @@
+#!/usr/bin/env bash
+#shellcheck disable=SC2001
+
+# netdata
+# real-time performance and health monitoring, done right!
+# (C) 2023 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Script to find a better name for cgroups
+#
+
+export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin:@sbindir_POST@"
+export LC_ALL=C
+
+cmd_line="'${0}' $(printf "'%s' " "${@}")"
+
+# -----------------------------------------------------------------------------
+# logging
+
+PROGRAM_NAME="$(basename "${0}")"
+
+# these should be the same with syslog() priorities
+NDLP_EMERG=0 # system is unusable
+NDLP_ALERT=1 # action must be taken immediately
+NDLP_CRIT=2 # critical conditions
+NDLP_ERR=3 # error conditions
+NDLP_WARN=4 # warning conditions
+NDLP_NOTICE=5 # normal but significant condition
+NDLP_INFO=6 # informational
+NDLP_DEBUG=7 # debug-level messages
+
+# the max (numerically) log level we will log
+LOG_LEVEL=$NDLP_INFO
+
+set_log_min_priority() {
+ case "${NETDATA_LOG_LEVEL,,}" in
+ "emerg" | "emergency")
+ LOG_LEVEL=$NDLP_EMERG
+ ;;
+
+ "alert")
+ LOG_LEVEL=$NDLP_ALERT
+ ;;
+
+ "crit" | "critical")
+ LOG_LEVEL=$NDLP_CRIT
+ ;;
+
+ "err" | "error")
+ LOG_LEVEL=$NDLP_ERR
+ ;;
+
+ "warn" | "warning")
+ LOG_LEVEL=$NDLP_WARN
+ ;;
+
+ "notice")
+ LOG_LEVEL=$NDLP_NOTICE
+ ;;
+
+ "info")
+ LOG_LEVEL=$NDLP_INFO
+ ;;
+
+ "debug")
+ LOG_LEVEL=$NDLP_DEBUG
+ ;;
+ esac
+}
+
+set_log_min_priority
+
+log() {
+ local level="${1}"
+ shift 1
+
+ [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return
+
+ systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <<EOFLOG
+INVOCATION_ID=${NETDATA_INVOCATION_ID}
+SYSLOG_IDENTIFIER=${PROGRAM_NAME}
+PRIORITY=${level}
+THREAD_TAG=cgroup-name
+ND_LOG_SOURCE=collector
+ND_REQUEST=${cmd_line}
+MESSAGE=${*//\\n/--NEWLINE--}
+
+EOFLOG
+ # AN EMPTY LINE IS NEEDED ABOVE
+}
+
+info() {
+ log "$NDLP_INFO" "${@}"
+}
+
+warning() {
+ log "$NDLP_WARN" "${@}"
+}
+
+error() {
+ log "$NDLP_ERR" "${@}"
+}
+
+fatal() {
+ log "$NDLP_ALERT" "${@}"
+ exit 1
+}
+
+debug() {
+ log "$NDLP_DEBUG" "${@}"
+}
+
+# -----------------------------------------------------------------------------
+
+function parse_docker_like_inspect_output() {
+ local output="${1}"
+ eval "$(grep -E "^(NOMAD_NAMESPACE|NOMAD_JOB_NAME|NOMAD_TASK_NAME|NOMAD_SHORT_ALLOC_ID|CONT_NAME|IMAGE_NAME)=" <<<"$output")"
+ if [ -n "$NOMAD_NAMESPACE" ] && [ -n "$NOMAD_JOB_NAME" ] && [ -n "$NOMAD_TASK_NAME" ] && [ -n "$NOMAD_SHORT_ALLOC_ID" ]; then
+ NAME="${NOMAD_NAMESPACE}-${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}-${NOMAD_SHORT_ALLOC_ID}"
+ else
+ NAME=$(echo "${CONT_NAME}" | sed 's|^/||')
+ fi
+ if [ -n "${IMAGE_NAME}" ]; then
+ LABELS="image=\"${IMAGE_NAME}\""
+ fi
+}
+
+function docker_like_get_name_command() {
+ local command="${1}"
+ local id="${2}"
+ info "Running command: ${command} inspect --format='{{range .Config.Env}}{{println .}}{{end}}CONT_NAME={{ .Name}}' \"${id}\""
+ if OUTPUT="$(${command} inspect --format='{{range .Config.Env}}{{println .}}{{end}}CONT_NAME={{ .Name}}{{println}}IMAGE_NAME={{ .Config.Image}}' "${id}")" &&
+ [ -n "$OUTPUT" ]; then
+ parse_docker_like_inspect_output "$OUTPUT"
+ fi
+ return 0
+}
+
+function docker_like_get_name_api() {
+ local host_var="${1}"
+ local host="${!host_var}"
+ local path="/containers/${2}/json"
+ if [ -z "${host}" ]; then
+ warning "No ${host_var} is set"
+ return 1
+ fi
+ if ! command -v jq >/dev/null 2>&1; then
+ warning "Can't find jq command line tool. jq is required for netdata to retrieve container name using ${host} API, falling back to docker ps"
+ return 1
+ fi
+ if [ -S "${host}" ]; then
+ info "Running API command: curl --unix-socket \"${host}\" http://localhost${path}"
+ JSON=$(curl -sS --unix-socket "${host}" "http://localhost${path}")
+ else
+ info "Running API command: curl \"${host}${path}\""
+ JSON=$(curl -sS "${host}${path}")
+ fi
+ if OUTPUT=$(echo "${JSON}" | jq -r '.Config.Env[],"CONT_NAME=\(.Name)","IMAGE_NAME=\(.Config.Image)"') && [ -n "$OUTPUT" ]; then
+ parse_docker_like_inspect_output "$OUTPUT"
+ fi
+ return 0
+}
+
+# get_lbl_val returns the value for the label with the given name.
+# Returns "null" string if the label doesn't exist.
+# Expected labels format: 'name="value",...'.
+function get_lbl_val() {
+ local labels want_name
+ labels="${1}"
+ want_name="${2}"
+
+ IFS=, read -ra labels <<< "$labels"
+
+ local lname lval
+ for l in "${labels[@]}"; do
+ IFS="=" read -r lname lval <<< "$l"
+ if [ "$want_name" = "$lname" ] && [ -n "$lval" ]; then
+ echo "${lval:1:-1}" # trim "
+ return 0
+ fi
+ done
+
+ echo "null"
+ return 1
+}
+
+function add_lbl_prefix() {
+ local orig_labels prefix
+ orig_labels="${1}"
+ prefix="${2}"
+
+ IFS=, read -ra labels <<< "$orig_labels"
+
+ local new_labels
+ for l in "${labels[@]}"; do
+ new_labels+="${prefix}${l},"
+ done
+
+ echo "${new_labels:0:-1}" # trim last ','
+}
+
+function remove_lbl() {
+ local orig_labels lbl_name
+ orig_labels="${1}"
+ lbl_name="${2}"
+
+ IFS=, read -ra labels <<< "$orig_labels"
+
+ local new_labels
+ for l in "${labels[@]}"; do
+ IFS="=" read -r lname lval <<< "$l"
+ [ "$lbl_name" != "$lname" ] && new_labels+="${l},"
+ done
+
+ echo "${new_labels:0:-1}" # trim last ','
+}
+
+function k8s_is_pause_container() {
+ local cgroup_path="${1}"
+
+ local file
+ if [ -d "${NETDATA_HOST_PREFIX}/sys/fs/cgroup/cpuacct" ]; then
+ file="${NETDATA_HOST_PREFIX}/sys/fs/cgroup/cpuacct/$cgroup_path/cgroup.procs"
+ else
+ file="${NETDATA_HOST_PREFIX}/sys/fs/cgroup/$cgroup_path/cgroup.procs"
+ fi
+
+ [ ! -f "$file" ] && return 1
+
+ local procs
+ IFS= read -rd' ' procs 2>/dev/null <"$file"
+ #shellcheck disable=SC2206
+ procs=($procs)
+
+ [ "${#procs[@]}" -ne 1 ] && return 1
+
+ IFS= read -r comm 2>/dev/null <"/proc/${procs[0]}/comm"
+
+ [ "$comm" == "pause" ]
+ return
+}
+
+function k8s_gcp_get_cluster_name() {
+ local header url id loc name
+ header="Metadata-Flavor: Google"
+ url="http://metadata/computeMetadata/v1"
+ if id=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/project/project-id") &&
+ loc=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/instance/attributes/cluster-location") &&
+ name=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/instance/attributes/cluster-name") &&
+ [ -n "$id" ] && [ -n "$loc" ] && [ -n "$name" ]; then
+ echo "gke_${id}_${loc}_${name}"
+ return 0
+ fi
+ return 1
+}
+
+# k8s_get_kubepod_name resolves */kubepods/* cgroup name.
+# pod level cgroup name format: 'pod_<namespace>_<pod_name>'
+# container level cgroup name format: 'cntr_<namespace>_<pod_name>_<container_name>'
+function k8s_get_kubepod_name() {
+ # GKE /sys/fs/cgroup/*/ (cri=docker, cgroups=v1):
+ # |-- kubepods
+ # | |-- burstable
+ # | | |-- pod98cee708-023b-11eb-933d-42010a800193
+ # | | | |-- 922161c98e6ea450bf665226cdc64ca2aa3e889934c2cff0aec4325f8f78ac03
+ # | `-- pode314bbac-d577-11ea-a171-42010a80013b
+ # | |-- 7d505356b04507de7b710016d540b2759483ed5f9136bb01a80872b08f771930
+ #
+ # GKE /sys/fs/cgroup/*/ (cri=containerd, cgroups=v1):
+ # |-- kubepods.slice
+ # | |-- kubepods-besteffort.slice
+ # | | |-- kubepods-besteffort-pode1465238_4518_4c21_832f_fd9f87033dad.slice
+ # | | | |-- cri-containerd-66be9b2efdf4d85288c319b8c1a2f50d2439b5617e36f45d9d0d0be1381113be.scope
+ # | `-- kubepods-pod91f5b561_369f_4103_8015_66391059996a.slice
+ # | |-- cri-containerd-24c53b774a586f06abc058619b47f71d9d869ac50c92898adbd199106fd0aaeb.scope
+ #
+ # GKE /sys/fs/cgroup/*/ (cri=crio, cgroups=v1):
+ # |-- kubepods.slice
+ # | |-- kubepods-besteffort.slice
+ # | | |-- kubepods-besteffort-podad412dfe_3589_4056_965a_592356172968.slice
+ # | | | |-- crio-77b019312fd9825828b70214b2c94da69c30621af2a7ee06f8beace4bc9439e5.scope
+ #
+ # Minikube (v1.8.2) /sys/fs/cgroup/*/ (cri=docker, cgroups=v1):
+ # |-- kubepods.slice
+ # | |-- kubepods-besteffort.slice
+ # | | |-- kubepods-besteffort-pod10fb5647_c724_400c_b9cc_0e6eae3110e7.slice
+ # | | | |-- docker-36e5eb5056dfdf6dbb75c0c44a1ecf23217fe2c50d606209d8130fcbb19fb5a7.scope
+ #
+ # kind v0.14.0
+ # |-- kubelet.slice
+ # | |-- kubelet-kubepods.slice
+ # | | |-- kubelet-kubepods-besteffort.slice
+ # | | | |-- kubelet-kubepods-besteffort-pod7881ed9e_c63e_4425_b5e0_ac55a08ae939.slice
+ # | | | | |-- cri-containerd-00c7939458bffc416bb03451526e9fde13301d6654cfeadf5b4964a7fb5be1a9.scope
+ #
+ # NOTE: cgroups plugin
+ # - uses '_' to join dir names (so it is <parent>_<child>_<child>_...)
+ # - replaces '.' with '-'
+
+ local fn="${FUNCNAME[0]}"
+ local cgroup_path="${1}"
+ local id="${2}"
+
+ if [[ ! $id =~ ^.*kubepods.* ]]; then
+ warning "${fn}: '${id}' is not kubepod cgroup."
+ return 1
+ fi
+
+ local clean_id="$id"
+ clean_id=${clean_id//.slice/}
+ clean_id=${clean_id//.scope/}
+
+ local name pod_uid cntr_id
+ if [[ $clean_id == "kubepods" ]]; then
+ name="$clean_id"
+ elif [[ $clean_id =~ .+(besteffort|burstable|guaranteed)$ ]]; then
+ # kubepods_<QOS_CLASS>
+ # kubepods_kubepods-<QOS_CLASS>
+ name=${clean_id//-/_}
+ name=${name/#kubepods_kubepods/kubepods}
+ elif [[ $clean_id =~ .+pod[a-f0-9_-]+_(docker|crio|cri-containerd)-([a-f0-9]+)$ ]]; then
+ # ...pod<POD_UID>_(docker|crio|cri-containerd)-<CONTAINER_ID> (POD_UID w/ "_")
+ cntr_id=${BASH_REMATCH[2]}
+ elif [[ $clean_id =~ .+pod[a-f0-9-]+_([a-f0-9]+)$ ]]; then
+ # ...pod<POD_UID>_<CONTAINER_ID>
+ cntr_id=${BASH_REMATCH[1]}
+ elif [[ $clean_id =~ .+pod([a-f0-9_-]+)$ ]]; then
+ # ...pod<POD_UID> (POD_UID w/ and w/o "_")
+ pod_uid=${BASH_REMATCH[1]}
+ pod_uid=${pod_uid//_/-}
+ fi
+
+ if [ -n "$name" ]; then
+ echo "$name"
+ return 0
+ fi
+
+ if [ -z "$pod_uid" ] && [ -z "$cntr_id" ]; then
+ warning "${fn}: can't extract pod_uid or container_id from the cgroup '$id'."
+ return 3
+ fi
+
+ [ -n "$pod_uid" ] && info "${fn}: cgroup '$id' is a pod(uid:$pod_uid)"
+ [ -n "$cntr_id" ] && info "${fn}: cgroup '$id' is a container(id:$cntr_id)"
+
+ if [ -n "$cntr_id" ] && k8s_is_pause_container "$cgroup_path"; then
+ return 3
+ fi
+
+ if ! command -v jq > /dev/null 2>&1; then
+ warning "${fn}: 'jq' command not available."
+ return 1
+ fi
+
+ local tmp_kube_cluster_name="${TMPDIR:-"/tmp"}/netdata-cgroups-k8s-cluster-name"
+ local tmp_kube_system_ns_uid_file="${TMPDIR:-"/tmp"}/netdata-cgroups-kubesystem-uid"
+ local tmp_kube_containers_file="${TMPDIR:-"/tmp"}/netdata-cgroups-containers"
+
+ local kube_cluster_name
+ local kube_system_uid
+ local labels
+
+ if [ -n "$cntr_id" ] &&
+ [ -f "$tmp_kube_cluster_name" ] &&
+ [ -f "$tmp_kube_system_ns_uid_file" ] &&
+ [ -f "$tmp_kube_containers_file" ] &&
+ labels=$(grep "$cntr_id" "$tmp_kube_containers_file" 2>/dev/null); then
+ IFS= read -r kube_system_uid 2>/dev/null <"$tmp_kube_system_ns_uid_file"
+ IFS= read -r kube_cluster_name 2>/dev/null <"$tmp_kube_cluster_name"
+ else
+ IFS= read -r kube_system_uid 2>/dev/null <"$tmp_kube_system_ns_uid_file"
+ IFS= read -r kube_cluster_name 2>/dev/null <"$tmp_kube_cluster_name"
+ [ -z "$kube_cluster_name" ] && ! kube_cluster_name=$(k8s_gcp_get_cluster_name) && kube_cluster_name="unknown"
+
+ local kube_system_ns
+ local pods
+
+ if [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_PORT_443_TCP_PORT}" ]; then
+ local token header host url
+ token="$(</var/run/secrets/kubernetes.io/serviceaccount/token)"
+ header="Authorization: Bearer $token"
+ host="$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT"
+
+ if [ -z "$kube_system_uid" ]; then
+ url="https://$host/api/v1/namespaces/kube-system"
+ # FIX: check HTTP response code
+ if ! kube_system_ns=$(curl --fail -sSk -H "$header" "$url" 2>&1); then
+ warning "${fn}: error on curl '${url}': ${kube_system_ns}."
+ fi
+ fi
+
+ local url
+ if [ -n "${USE_KUBELET_FOR_PODS_METADATA}" ]; then
+ url="${KUBELET_URL:-https://localhost:10250}/pods"
+ else
+ url="https://$host/api/v1/pods"
+ [ -n "$MY_NODE_NAME" ] && url+="?fieldSelector=spec.nodeName==$MY_NODE_NAME"
+ fi
+
+ # FIX: check HTTP response code
+ if ! pods=$(curl --fail -sSk -H "$header" "$url" 2>&1); then
+ warning "${fn}: error on curl '${url}': ${pods}."
+ return 1
+ fi
+ elif ps -C kubelet >/dev/null 2>&1 && command -v kubectl >/dev/null 2>&1; then
+ if [ -z "$kube_system_uid" ]; then
+ if ! kube_system_ns=$(kubectl --kubeconfig="$KUBE_CONFIG" get namespaces kube-system -o json 2>&1); then
+ warning "${fn}: error on 'kubectl': ${kube_system_ns}."
+ fi
+ fi
+
+ [[ -z ${KUBE_CONFIG+x} ]] && KUBE_CONFIG="/etc/kubernetes/admin.conf"
+ if ! pods=$(kubectl --kubeconfig="$KUBE_CONFIG" get pods --all-namespaces -o json 2>&1); then
+ warning "${fn}: error on 'kubectl': ${pods}."
+ return 1
+ fi
+ else
+ warning "${fn}: not inside the k8s cluster and 'kubectl' command not available."
+ return 1
+ fi
+
+ if [ -n "$kube_system_ns" ] && ! kube_system_uid=$(jq -r '.metadata.uid' <<<"$kube_system_ns" 2>&1); then
+ warning "${fn}: error on 'jq' parse kube_system_ns: ${kube_system_uid}."
+ fi
+
+ local jq_filter
+ jq_filter+='.items[] | "'
+ jq_filter+='namespace=\"\(.metadata.namespace)\",'
+ jq_filter+='pod_name=\"\(.metadata.name)\",'
+ jq_filter+='pod_uid=\"\(.metadata.uid)\",'
+ #jq_filter+='\(.metadata.labels | to_entries | map("pod_label_"+.key+"=\""+.value+"\"") | join(",") | if length > 0 then .+"," else . end)'
+ jq_filter+='\((.metadata.ownerReferences[]? | select(.controller==true) | "controller_kind=\""+.kind+"\",controller_name=\""+.name+"\",") // "")'
+ jq_filter+='node_name=\"\(.spec.nodeName)\",'
+ jq_filter+='" + '
+ jq_filter+='(.status.containerStatuses[]? | "'
+ jq_filter+='container_name=\"\(.name)\",'
+ jq_filter+='container_id=\"\(.containerID)\"'
+ jq_filter+='") | '
+ jq_filter+='sub("(docker|cri-o|containerd)://";"")' # containerID: docker://a346da9bc0e3eaba6b295f64ac16e02f2190db2cef570835706a9e7a36e2c722
+
+ local containers
+ if ! containers=$(jq -r "${jq_filter}" <<<"$pods" 2>&1); then
+ warning "${fn}: error on 'jq' parse pods: ${containers}."
+ return 1
+ fi
+
+ [ -n "$kube_cluster_name" ] && echo "$kube_cluster_name" >"$tmp_kube_cluster_name" 2>/dev/null
+ [ -n "$kube_system_ns" ] && [ -n "$kube_system_uid" ] && echo "$kube_system_uid" >"$tmp_kube_system_ns_uid_file" 2>/dev/null
+ echo "$containers" >"$tmp_kube_containers_file" 2>/dev/null
+ fi
+
+ local qos_class
+ if [[ $clean_id =~ .+(besteffort|burstable) ]]; then
+ qos_class="${BASH_REMATCH[1]}"
+ else
+ qos_class="guaranteed"
+ fi
+
+ # available labels:
+ # namespace, pod_name, pod_uid, container_name, container_id, node_name
+ if [ -n "$cntr_id" ]; then
+ if [ -n "$labels" ] || labels=$(grep "$cntr_id" <<< "$containers" 2> /dev/null); then
+ labels+=',kind="container"'
+ labels+=",qos_class=\"$qos_class\""
+ [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
+ [ -n "$kube_cluster_name" ] && [ "$kube_cluster_name" != "unknown" ] && labels+=",cluster_name=\"$kube_cluster_name\""
+ name="cntr"
+ name+="_$(get_lbl_val "$labels" namespace)"
+ name+="_$(get_lbl_val "$labels" pod_name)"
+ name+="_$(get_lbl_val "$labels" container_name)"
+ labels=$(remove_lbl "$labels" "container_id")
+ labels=$(remove_lbl "$labels" "pod_uid")
+ labels=$(add_lbl_prefix "$labels" "k8s_")
+ name+=" $labels"
+ else
+ return 2
+ fi
+ elif [ -n "$pod_uid" ]; then
+ if labels=$(grep "$pod_uid" -m 1 <<< "$containers" 2> /dev/null); then
+ labels="${labels%%,container_*}"
+ labels+=',kind="pod"'
+ labels+=",qos_class=\"$qos_class\""
+ [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
+ [ -n "$kube_cluster_name" ] && [ "$kube_cluster_name" != "unknown" ] && labels+=",cluster_name=\"$kube_cluster_name\""
+ name="pod"
+ name+="_$(get_lbl_val "$labels" namespace)"
+ name+="_$(get_lbl_val "$labels" pod_name)"
+ labels=$(remove_lbl "$labels" "pod_uid")
+ labels=$(add_lbl_prefix "$labels" "k8s_")
+ name+=" $labels"
+ else
+ return 2
+ fi
+ fi
+
+ # jq filter nonexistent field and nonexistent label value is 'null'
+ if [[ $name =~ _null(_|$) ]]; then
+ warning "${fn}: invalid name: $name (cgroup '$id')"
+ if [ -n "${USE_KUBELET_FOR_PODS_METADATA}" ]; then
+ # local data is cached and may not contain the correct id
+ return 2
+ fi
+ return 1
+ fi
+
+ echo "$name"
+ [ -n "$name" ]
+ return
+}
+
+function k8s_get_name() {
+ local fn="${FUNCNAME[0]}"
+ local cgroup_path="${1}"
+ local id="${2}"
+ local kubepod_name=""
+
+ kubepod_name=$(k8s_get_kubepod_name "$cgroup_path" "$id")
+
+ case "$?" in
+ 0)
+ kubepod_name="k8s_${kubepod_name}"
+
+ local name labels
+ name=${kubepod_name%% *}
+ labels=${kubepod_name#* }
+
+ if [ "$name" != "$labels" ]; then
+ info "${fn}: cgroup '${id}' has chart name '${name}', labels '${labels}"
+ NAME="$name"
+ LABELS="$labels"
+ else
+ info "${fn}: cgroup '${id}' has chart name '${NAME}'"
+ NAME="$name"
+ fi
+ EXIT_CODE=$EXIT_SUCCESS
+ ;;
+ 1)
+ NAME="k8s_${id}"
+ warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and enabling it."
+ EXIT_CODE=$EXIT_SUCCESS
+ ;;
+ 2)
+ NAME="k8s_${id}"
+ warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and asking for retry."
+ EXIT_CODE=$EXIT_RETRY
+ ;;
+ *)
+ NAME="k8s_${id}"
+ warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and disabling it."
+ EXIT_CODE=$EXIT_DISABLE
+ ;;
+ esac
+}
+
+function docker_get_name() {
+ local id="${1}"
+ # See https://github.com/netdata/netdata/pull/13523 for details
+ if command -v snap >/dev/null 2>&1 && snap list docker >/dev/null 2>&1; then
+ docker_like_get_name_api DOCKER_HOST "${id}"
+ elif hash docker 2> /dev/null; then
+ docker_like_get_name_command docker "${id}"
+ else
+ docker_like_get_name_api DOCKER_HOST "${id}" || docker_like_get_name_command podman "${id}"
+ fi
+ if [ -z "${NAME}" ]; then
+ warning "cannot find the name of docker container '${id}'"
+ EXIT_CODE=$EXIT_RETRY
+ NAME="${id:0:12}"
+ else
+ info "docker container '${id}' is named '${NAME}'"
+ fi
+}
+
+function docker_validate_id() {
+ local id="${1}"
+ if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
+ docker_get_name "${id}"
+ else
+ error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
+}
+
+function podman_get_name() {
+ local id="${1}"
+
+ # for Podman, prefer using the API if we can, as netdata will not normally have access
+ # to other users' containers, so they will not be visible when running `podman ps`
+ docker_like_get_name_api PODMAN_HOST "${id}" || docker_like_get_name_command podman "${id}"
+
+ if [ -z "${NAME}" ]; then
+ warning "cannot find the name of podman container '${id}'"
+ EXIT_CODE=$EXIT_RETRY
+ NAME="${id:0:12}"
+ else
+ info "podman container '${id}' is named '${NAME}'"
+ fi
+}
+
+function podman_validate_id() {
+ local id="${1}"
+ if [ -n "${id}" ] && [ ${#id} -eq 64 ]; then
+ podman_get_name "${id}"
+ else
+ error "a podman id cannot be extracted from docker cgroup '${CGROUP}'."
+ fi
+}
+
+# -----------------------------------------------------------------------------
+
+DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
+PODMAN_HOST="${PODMAN_HOST:=/run/podman/podman.sock}"
+CGROUP_PATH="${1}" # the path as it is (e.g. '/docker/efcf4c409')
+CGROUP="${2}" # the modified path (e.g. 'docker_efcf4c409')
+EXIT_SUCCESS=0
+EXIT_RETRY=2
+EXIT_DISABLE=3
+EXIT_CODE=$EXIT_SUCCESS
+NAME=
+LABELS=
+
+# -----------------------------------------------------------------------------
+
+if [ -z "${CGROUP}" ]; then
+ fatal "called without a cgroup name. Nothing to do."
+fi
+
+if [ -z "${NAME}" ]; then
+ if [[ ${CGROUP} =~ ^.*kubepods.* ]]; then
+ k8s_get_name "${CGROUP_PATH}" "${CGROUP}"
+ fi
+fi
+
+if [ -z "${NAME}" ]; then
+ if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # docker containers
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+ elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # ECS
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+ elif [[ ${CGROUP} =~ system.slice_containerd.service_cpuset_[a-fA-F0-9]+[-_\.]?.*$ ]]; then
+ # docker containers under containerd
+ #shellcheck disable=SC1117
+ DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ystem.slice_containerd.service_cpuset_\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
+ docker_validate_id "${DOCKERID}"
+ elif [[ ${CGROUP} =~ ^.*libpod-[a-fA-F0-9]+.*$ ]]; then
+ # Podman
+ PODMANID="$(echo "${CGROUP}" | sed "s|^.*libpod-\([a-fA-F0-9]\+\).*$|\1|")"
+ podman_validate_id "${PODMANID}"
+
+ elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
+ # systemd-nspawn
+ NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
+
+ elif [[ ${CGROUP} =~ machine.slice_machine.*-lxc ]]; then
+ # libvirtd / lxc containers
+ # machine.slice machine-lxc/x2d969/x2dhubud0xians01.scope => lxc/hubud0xians01
+ # machine.slice_machine-lxc/x2d969/x2dhubud0xians01.scope/libvirt_init.scope => lxc/hubud0xians01/libvirt_init
+ NAME="lxc/$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-lxc//; s/[\/_]x2d[[:digit:]]*//; s/[\/_]x2d//g; s/\.scope//g')"
+ elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ # machine.slice_machine-qemu_x2d1_x2dopnsense.scope => qemu_opnsense
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/[\/_]x2d[[:digit:]]*//; s/[\/_]x2d//g; s/\.scope//g')"
+
+ elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
+ # libvirtd / qemu virtual machines
+ NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
+
+ elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d "${NETDATA_HOST_PREFIX}/etc/pve" ]]; then
+ # Proxmox VMs
+ FILENAME="${NETDATA_HOST_PREFIX}/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
+ if [[ -f $FILENAME && -r $FILENAME ]]; then
+ NAME="qemu_$(grep -e '^name: ' "${FILENAME}" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d "${NETDATA_HOST_PREFIX}/etc/pve" ]]; then
+ # Proxmox Containers (LXC)
+ FILENAME="${NETDATA_HOST_PREFIX}/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
+ if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
+ NAME=$(grep -e '^hostname: ' "${FILENAME}" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
+ else
+ error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
+ fi
+ elif [[ ${CGROUP} =~ lxc.payload.* ]]; then
+ # LXC 4.0
+ NAME="$(echo "${CGROUP}" | sed 's/lxc\.payload\.\(.*\)/\1/g')"
+ fi
+
+ [ -z "${NAME}" ] && NAME="${CGROUP}"
+ [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
+fi
+
+NAME="${NAME// /_}"
+
+info "cgroup '${CGROUP}' is called '${NAME}', labels '${LABELS}'"
+if [ -n "$LABELS" ]; then
+ echo "${NAME} ${LABELS}"
+else
+ echo "${NAME}"
+fi
+
+exit ${EXIT_CODE}
diff --git a/collectors/cgroups.plugin/cgroup-network-helper.sh b/collectors/cgroups.plugin/cgroup-network-helper.sh
deleted file mode 100755
index 008bc987f..000000000
--- a/collectors/cgroups.plugin/cgroup-network-helper.sh
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1117
-
-# cgroup-network-helper.sh
-# detect container and virtual machine interfaces
-#
-# (C) 2017 Costa Tsaousis
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# This script is called as root (by cgroup-network), with either a pid, or a cgroup path.
-# It tries to find all the network interfaces that belong to the same cgroup.
-#
-# It supports several method for this detection:
-#
-# 1. cgroup-network (the binary father of this script) detects veth network interfaces,
-# by examining iflink and ifindex IDs and switching namespaces
-# (it also detects the interface name as it is used by the container).
-#
-# 2. this script, uses /proc/PID/fdinfo to find tun/tap network interfaces.
-#
-# 3. this script, calls virsh to find libvirt network interfaces.
-#
-
-# -----------------------------------------------------------------------------
-
-# the system path is cleared by cgroup-network
-# shellcheck source=/dev/null
-[ -f /etc/profile ] && source /etc/profile
-
-export LC_ALL=C
-
-PROGRAM_NAME="$(basename "${0}")"
-
-LOG_LEVEL_ERR=1
-LOG_LEVEL_WARN=2
-LOG_LEVEL_INFO=3
-LOG_LEVEL="$LOG_LEVEL_INFO"
-
-set_log_severity_level() {
- case ${NETDATA_LOG_SEVERITY_LEVEL,,} in
- "info") LOG_LEVEL="$LOG_LEVEL_INFO";;
- "warn" | "warning") LOG_LEVEL="$LOG_LEVEL_WARN";;
- "err" | "error") LOG_LEVEL="$LOG_LEVEL_ERR";;
- esac
-}
-
-set_log_severity_level
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-log() {
- local status="${1}"
- shift
-
- echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
-
-}
-
-info() {
- [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_INFO" -gt "$LOG_LEVEL" ]] && return
- log INFO "${@}"
-}
-
-warning() {
- [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_WARN" -gt "$LOG_LEVEL" ]] && return
- log WARNING "${@}"
-}
-
-error() {
- [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_ERR" -gt "$LOG_LEVEL" ]] && return
- log ERROR "${@}"
-}
-
-fatal() {
- log FATAL "${@}"
- exit 1
-}
-
-debug=${NETDATA_CGROUP_NETWORK_HELPER_DEBUG=0}
-debug() {
- [ "${debug}" = "1" ] && log DEBUG "${@}"
-}
-
-# -----------------------------------------------------------------------------
-# check for BASH v4+ (required for associative arrays)
-
-[ $(( BASH_VERSINFO[0] )) -lt 4 ] && \
- fatal "BASH version 4 or later is required (this is ${BASH_VERSION})."
-
-# -----------------------------------------------------------------------------
-# parse the arguments
-
-pid=
-cgroup=
-while [ -n "${1}" ]
-do
- case "${1}" in
- --cgroup) cgroup="${2}"; shift 1;;
- --pid|-p) pid="${2}"; shift 1;;
- --debug|debug) debug=1;;
- *) fatal "Cannot understand argument '${1}'";;
- esac
-
- shift
-done
-
-if [ -z "${pid}" ] && [ -z "${cgroup}" ]
-then
- fatal "Either --pid or --cgroup is required"
-fi
-
-# -----------------------------------------------------------------------------
-
-set_source() {
- [ ${debug} -eq 1 ] && echo "SRC ${*}"
-}
-
-
-# -----------------------------------------------------------------------------
-# veth interfaces via cgroup
-
-# cgroup-network can detect veth interfaces by itself (written in C).
-# If you seek for a shell version of what it does, check this:
-# https://github.com/netdata/netdata/issues/474#issuecomment-317866709
-
-
-# -----------------------------------------------------------------------------
-# tun/tap interfaces via /proc/PID/fdinfo
-
-# find any tun/tap devices linked to a pid
-proc_pid_fdinfo_iff() {
- local p="${1}" # the pid
-
- debug "Searching for tun/tap interfaces for pid ${p}..."
- set_source "fdinfo"
- grep "^iff:.*" "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2
-}
-
-find_tun_tap_interfaces_for_cgroup() {
- local c="${1}" # the cgroup path
- [ -d "${c}/emulator" ] && c="${c}/emulator" # check for 'emulator' subdirectory
- c="${c}/cgroup.procs" # make full path
-
- # for each pid of the cgroup
- # find any tun/tap devices linked to the pid
- if [ -f "${c}" ]
- then
- local p
- for p in $(< "${c}" )
- do
- proc_pid_fdinfo_iff "${p}"
- done
- else
- debug "Cannot find file '${c}', not searching for tun/tap interfaces."
- fi
-}
-
-
-# -----------------------------------------------------------------------------
-# virsh domain network interfaces
-
-virsh_cgroup_to_domain_name() {
- local c="${1}" # the cgroup path
-
- debug "extracting a possible virsh domain from cgroup ${c}..."
-
- # extract for the cgroup path
- sed -n -e "s|.*/machine-qemu\\\\x2d[0-9]\+\\\\x2d\(.*\)\.scope$|\1|p" \
- -e "s|.*/machine/qemu-[0-9]\+-\(.*\)\.libvirt-qemu$|\1|p" \
- -e "s|.*/machine/\(.*\)\.libvirt-qemu$|\1|p" \
- <<EOF
-${c}
-EOF
-}
-
-virsh_find_all_interfaces_for_cgroup() {
- local c="${1}" # the cgroup path
-
- # the virsh command
- local virsh
- # shellcheck disable=SC2230
- virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)"
-
- if [ -n "${virsh}" ]
- then
- local d
- d="$(virsh_cgroup_to_domain_name "${c}")"
- # convert hex to character
- # e.g.: vm01\x2dweb => vm01-web (https://github.com/netdata/netdata/issues/11088#issuecomment-832618149)
- d="$(printf '%b' "${d}")"
-
- if [ -n "${d}" ]
- then
- debug "running: virsh domiflist ${d}; to find the network interfaces"
-
- # 'virsh -r domiflist <domain>' example output
- # Interface Type Source Model MAC
- #--------------------------------------------------------------
- # vnet3 bridge br0 virtio 52:54:00:xx:xx:xx
- # vnet4 network default virtio 52:54:00:yy:yy:yy
-
- # match only 'network' interfaces from virsh output
- set_source "virsh"
- "${virsh}" -r domiflist "${d}" |\
- sed -n \
- -e "s|^[[:space:]]\?\([^[:space:]]\+\)[[:space:]]\+network[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p" \
- -e "s|^[[:space:]]\?\([^[:space:]]\+\)[[:space:]]\+bridge[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p"
- else
- debug "no virsh domain extracted from cgroup ${c}"
- fi
- else
- debug "virsh command is not available"
- fi
-}
-
-# -----------------------------------------------------------------------------
-# netnsid detected interfaces
-
-netnsid_find_all_interfaces_for_pid() {
- local pid="${1}"
- [ -z "${pid}" ] && return 1
-
- local nsid
- nsid=$(lsns -t net -p "${pid}" -o NETNSID -nr 2>/dev/null)
- if [ -z "${nsid}" ] || [ "${nsid}" = "unassigned" ]; then
- return 1
- fi
-
- set_source "netnsid"
- ip link show |\
- grep -B 1 -E " link-netnsid ${nsid}($| )" |\
- sed -n -e "s|^[[:space:]]*[0-9]\+:[[:space:]]\+\([A-Za-z0-9_]\+\)\(@[A-Za-z0-9_]\+\)*:[[:space:]].*$|\1|p"
-}
-
-netnsid_find_all_interfaces_for_cgroup() {
- local c="${1}" # the cgroup path
-
- if [ -f "${c}/cgroup.procs" ]; then
- netnsid_find_all_interfaces_for_pid "$(head -n 1 "${c}/cgroup.procs" 2>/dev/null)"
- else
- debug "Cannot find file '${c}/cgroup.procs', not searching for netnsid interfaces."
- fi
-}
-
-# -----------------------------------------------------------------------------
-
-find_all_interfaces_of_pid_or_cgroup() {
- local p="${1}" c="${2}" # the pid and the cgroup path
-
- if [ -n "${pid}" ]
- then
- # we have been called with a pid
-
- proc_pid_fdinfo_iff "${p}"
- netnsid_find_all_interfaces_for_pid "${p}"
-
- elif [ -n "${c}" ]
- then
- # we have been called with a cgroup
-
- info "searching for network interfaces of cgroup '${c}'"
-
- find_tun_tap_interfaces_for_cgroup "${c}"
- virsh_find_all_interfaces_for_cgroup "${c}"
- netnsid_find_all_interfaces_for_cgroup "${c}"
-
- else
-
- error "Either a pid or a cgroup path is needed"
- return 1
-
- fi
-
- return 0
-}
-
-# -----------------------------------------------------------------------------
-
-# an associative array to store the interfaces
-# the index is the interface name as seen by the host
-# the value is the interface name as seen by the guest / container
-declare -A devs=()
-
-# store all interfaces found in the associative array
-# this will also give the unique devices, as seen by the host
-last_src=
-# shellcheck disable=SC2162
-while read host_device guest_device
-do
- [ -z "${host_device}" ] && continue
-
- [ "${host_device}" = "SRC" ] && last_src="${guest_device}" && continue
-
- # the default guest_device is the host_device
- [ -z "${guest_device}" ] && guest_device="${host_device}"
-
- # when we run in debug, show the source
- debug "Found host device '${host_device}', guest device '${guest_device}', detected via '${last_src}'"
-
- if [ -z "${devs[${host_device}]}" ] || [ "${devs[${host_device}]}" = "${host_device}" ]; then
- devs[${host_device}]="${guest_device}"
- fi
-
-done < <( find_all_interfaces_of_pid_or_cgroup "${pid}" "${cgroup}" )
-
-# print the interfaces found, in the format netdata expects them
-found=0
-for x in "${!devs[@]}"
-do
- found=$((found + 1))
- echo "${x} ${devs[${x}]}"
-done
-
-debug "found ${found} network interfaces for pid '${pid}', cgroup '${cgroup}', run as ${USER}, ${UID}"
-
-# let netdata know if we found any
-[ ${found} -eq 0 ] && exit 1
-exit 0
diff --git a/collectors/cgroups.plugin/cgroup-network-helper.sh.in b/collectors/cgroups.plugin/cgroup-network-helper.sh.in
new file mode 100755
index 000000000..da9b9162a
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-network-helper.sh.in
@@ -0,0 +1,376 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC1117
+
+# cgroup-network-helper.sh
+# detect container and virtual machine interfaces
+#
+# (C) 2023 Netdata Inc.
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This script is called as root (by cgroup-network), with either a pid, or a cgroup path.
+# It tries to find all the network interfaces that belong to the same cgroup.
+#
+# It supports several method for this detection:
+#
+# 1. cgroup-network (the binary father of this script) detects veth network interfaces,
+# by examining iflink and ifindex IDs and switching namespaces
+# (it also detects the interface name as it is used by the container).
+#
+# 2. this script, uses /proc/PID/fdinfo to find tun/tap network interfaces.
+#
+# 3. this script, calls virsh to find libvirt network interfaces.
+#
+
+# -----------------------------------------------------------------------------
+
+# the system path is cleared by cgroup-network
+# shellcheck source=/dev/null
+[ -f /etc/profile ] && source /etc/profile
+export PATH="${PATH}:@sbindir_POST@"
+
+export LC_ALL=C
+
+cmd_line="'${0}' $(printf "'%s' " "${@}")"
+
+# -----------------------------------------------------------------------------
+# logging
+
+PROGRAM_NAME="$(basename "${0}")"
+
+# these should be the same with syslog() priorities
+NDLP_EMERG=0 # system is unusable
+NDLP_ALERT=1 # action must be taken immediately
+NDLP_CRIT=2 # critical conditions
+NDLP_ERR=3 # error conditions
+NDLP_WARN=4 # warning conditions
+NDLP_NOTICE=5 # normal but significant condition
+NDLP_INFO=6 # informational
+NDLP_DEBUG=7 # debug-level messages
+
+# the max (numerically) log level we will log
+LOG_LEVEL=$NDLP_INFO
+
+set_log_min_priority() {
+ case "${NETDATA_LOG_LEVEL,,}" in
+ "emerg" | "emergency")
+ LOG_LEVEL=$NDLP_EMERG
+ ;;
+
+ "alert")
+ LOG_LEVEL=$NDLP_ALERT
+ ;;
+
+ "crit" | "critical")
+ LOG_LEVEL=$NDLP_CRIT
+ ;;
+
+ "err" | "error")
+ LOG_LEVEL=$NDLP_ERR
+ ;;
+
+ "warn" | "warning")
+ LOG_LEVEL=$NDLP_WARN
+ ;;
+
+ "notice")
+ LOG_LEVEL=$NDLP_NOTICE
+ ;;
+
+ "info")
+ LOG_LEVEL=$NDLP_INFO
+ ;;
+
+ "debug")
+ LOG_LEVEL=$NDLP_DEBUG
+ ;;
+ esac
+}
+
+set_log_min_priority
+
+log() {
+ local level="${1}"
+ shift 1
+
+ [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return
+
+ systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <<EOFLOG
+INVOCATION_ID=${NETDATA_INVOCATION_ID}
+SYSLOG_IDENTIFIER=${PROGRAM_NAME}
+PRIORITY=${level}
+THREAD_TAG=cgroup-network-helper
+ND_LOG_SOURCE=collector
+ND_REQUEST=${cmd_line}
+MESSAGE=${*//\\n/--NEWLINE--}
+
+EOFLOG
+ # AN EMPTY LINE IS NEEDED ABOVE
+}
+
+info() {
+ log "$NDLP_INFO" "${@}"
+}
+
+warning() {
+ log "$NDLP_WARN" "${@}"
+}
+
+error() {
+ log "$NDLP_ERR" "${@}"
+}
+
+fatal() {
+ log "$NDLP_ALERT" "${@}"
+ exit 1
+}
+
+debug() {
+ log "$NDLP_DEBUG" "${@}"
+}
+
+debug=0
+if [ "${NETDATA_CGROUP_NETWORK_HELPER_DEBUG-0}" = "1" ]; then
+ debug=1
+ LOG_LEVEL=$NDLP_DEBUG
+fi
+
+# -----------------------------------------------------------------------------
+# check for BASH v4+ (required for associative arrays)
+
+if [ ${BASH_VERSINFO[0]} -lt 4 ]; then
+ echo >&2 "BASH version 4 or later is required (this is ${BASH_VERSION})."
+ exit 1
+fi
+
+# -----------------------------------------------------------------------------
+# parse the arguments
+
+pid=
+cgroup=
+while [ -n "${1}" ]
+do
+ case "${1}" in
+ --cgroup) cgroup="${2}"; shift 1;;
+ --pid|-p) pid="${2}"; shift 1;;
+ --debug|debug)
+ debug=1
+ LOG_LEVEL=$NDLP_DEBUG
+ ;;
+ *) fatal "Cannot understand argument '${1}'";;
+ esac
+
+ shift
+done
+
+if [ -z "${pid}" ] && [ -z "${cgroup}" ]
+then
+ fatal "Either --pid or --cgroup is required"
+fi
+
+# -----------------------------------------------------------------------------
+
+set_source() {
+ [ ${debug} -eq 1 ] && echo "SRC ${*}"
+}
+
+
+# -----------------------------------------------------------------------------
+# veth interfaces via cgroup
+
+# cgroup-network can detect veth interfaces by itself (written in C).
+# If you seek for a shell version of what it does, check this:
+# https://github.com/netdata/netdata/issues/474#issuecomment-317866709
+
+
+# -----------------------------------------------------------------------------
+# tun/tap interfaces via /proc/PID/fdinfo
+
+# find any tun/tap devices linked to a pid
+proc_pid_fdinfo_iff() {
+ local p="${1}" # the pid
+
+ debug "Searching for tun/tap interfaces for pid ${p}..."
+ set_source "fdinfo"
+ grep "^iff:.*" "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2
+}
+
+find_tun_tap_interfaces_for_cgroup() {
+ local c="${1}" # the cgroup path
+ [ -d "${c}/emulator" ] && c="${c}/emulator" # check for 'emulator' subdirectory
+ c="${c}/cgroup.procs" # make full path
+
+ # for each pid of the cgroup
+ # find any tun/tap devices linked to the pid
+ if [ -f "${c}" ]
+ then
+ local p
+ for p in $(< "${c}" )
+ do
+ proc_pid_fdinfo_iff "${p}"
+ done
+ else
+ debug "Cannot find file '${c}', not searching for tun/tap interfaces."
+ fi
+}
+
+
+# -----------------------------------------------------------------------------
+# virsh domain network interfaces
+
+virsh_cgroup_to_domain_name() {
+ local c="${1}" # the cgroup path
+
+ debug "extracting a possible virsh domain from cgroup ${c}..."
+
+ # extract for the cgroup path
+ sed -n -e "s|.*/machine-qemu\\\\x2d[0-9]\+\\\\x2d\(.*\)\.scope$|\1|p" \
+ -e "s|.*/machine/qemu-[0-9]\+-\(.*\)\.libvirt-qemu$|\1|p" \
+ -e "s|.*/machine/\(.*\)\.libvirt-qemu$|\1|p" \
+ <<EOF
+${c}
+EOF
+}
+
+virsh_find_all_interfaces_for_cgroup() {
+ local c="${1}" # the cgroup path
+
+ # the virsh command
+ local virsh
+ # shellcheck disable=SC2230
+ virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)"
+
+ if [ -n "${virsh}" ]
+ then
+ local d
+ d="$(virsh_cgroup_to_domain_name "${c}")"
+ # convert hex to character
+ # e.g.: vm01\x2dweb => vm01-web (https://github.com/netdata/netdata/issues/11088#issuecomment-832618149)
+ d="$(printf '%b' "${d}")"
+
+ if [ -n "${d}" ]
+ then
+ debug "running: virsh domiflist ${d}; to find the network interfaces"
+
+ # 'virsh -r domiflist <domain>' example output
+ # Interface Type Source Model MAC
+ #--------------------------------------------------------------
+ # vnet3 bridge br0 virtio 52:54:00:xx:xx:xx
+ # vnet4 network default virtio 52:54:00:yy:yy:yy
+
+ # match only 'network' interfaces from virsh output
+ set_source "virsh"
+ "${virsh}" -r domiflist "${d}" |\
+ sed -n \
+ -e "s|^[[:space:]]\?\([^[:space:]]\+\)[[:space:]]\+network[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p" \
+ -e "s|^[[:space:]]\?\([^[:space:]]\+\)[[:space:]]\+bridge[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p"
+ else
+ debug "no virsh domain extracted from cgroup ${c}"
+ fi
+ else
+ debug "virsh command is not available"
+ fi
+}
+
+# -----------------------------------------------------------------------------
+# netnsid detected interfaces
+
+netnsid_find_all_interfaces_for_pid() {
+ local pid="${1}"
+ [ -z "${pid}" ] && return 1
+
+ local nsid
+ nsid=$(lsns -t net -p "${pid}" -o NETNSID -nr 2>/dev/null)
+ if [ -z "${nsid}" ] || [ "${nsid}" = "unassigned" ]; then
+ return 1
+ fi
+
+ set_source "netnsid"
+ ip link show |\
+ grep -B 1 -E " link-netnsid ${nsid}($| )" |\
+ sed -n -e "s|^[[:space:]]*[0-9]\+:[[:space:]]\+\([A-Za-z0-9_]\+\)\(@[A-Za-z0-9_]\+\)*:[[:space:]].*$|\1|p"
+}
+
+netnsid_find_all_interfaces_for_cgroup() {
+ local c="${1}" # the cgroup path
+
+ if [ -f "${c}/cgroup.procs" ]; then
+ netnsid_find_all_interfaces_for_pid "$(head -n 1 "${c}/cgroup.procs" 2>/dev/null)"
+ else
+ debug "Cannot find file '${c}/cgroup.procs', not searching for netnsid interfaces."
+ fi
+}
+
+# -----------------------------------------------------------------------------
+
+find_all_interfaces_of_pid_or_cgroup() {
+ local p="${1}" c="${2}" # the pid and the cgroup path
+
+ if [ -n "${pid}" ]
+ then
+ # we have been called with a pid
+
+ proc_pid_fdinfo_iff "${p}"
+ netnsid_find_all_interfaces_for_pid "${p}"
+
+ elif [ -n "${c}" ]
+ then
+ # we have been called with a cgroup
+
+ info "searching for network interfaces of cgroup '${c}'"
+
+ find_tun_tap_interfaces_for_cgroup "${c}"
+ virsh_find_all_interfaces_for_cgroup "${c}"
+ netnsid_find_all_interfaces_for_cgroup "${c}"
+
+ else
+
+ error "Either a pid or a cgroup path is needed"
+ return 1
+
+ fi
+
+ return 0
+}
+
+# -----------------------------------------------------------------------------
+
+# an associative array to store the interfaces
+# the index is the interface name as seen by the host
+# the value is the interface name as seen by the guest / container
+declare -A devs=()
+
+# store all interfaces found in the associative array
+# this will also give the unique devices, as seen by the host
+last_src=
+# shellcheck disable=SC2162
+while read host_device guest_device
+do
+ [ -z "${host_device}" ] && continue
+
+ [ "${host_device}" = "SRC" ] && last_src="${guest_device}" && continue
+
+ # the default guest_device is the host_device
+ [ -z "${guest_device}" ] && guest_device="${host_device}"
+
+ # when we run in debug, show the source
+ debug "Found host device '${host_device}', guest device '${guest_device}', detected via '${last_src}'"
+
+ if [ -z "${devs[${host_device}]}" ] || [ "${devs[${host_device}]}" = "${host_device}" ]; then
+ devs[${host_device}]="${guest_device}"
+ fi
+
+done < <( find_all_interfaces_of_pid_or_cgroup "${pid}" "${cgroup}" )
+
+# print the interfaces found, in the format netdata expects them
+found=0
+for x in "${!devs[@]}"
+do
+ found=$((found + 1))
+ echo "${x} ${devs[${x}]}"
+done
+
+debug "found ${found} network interfaces for pid '${pid}', cgroup '${cgroup}', run as ${USER}, ${UID}"
+
+# let netdata know if we found any
+[ ${found} -eq 0 ] && exit 1
+exit 0
diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c
index b00f246bb..508ea07c6 100644
--- a/collectors/cgroups.plugin/cgroup-network.c
+++ b/collectors/cgroups.plugin/cgroup-network.c
@@ -10,12 +10,16 @@
#include <sched.h>
#endif
-char environment_variable2[FILENAME_MAX + 50] = "";
-char environment_variable3[FILENAME_MAX + 50] = "";
+char env_netdata_host_prefix[FILENAME_MAX + 50] = "";
+char env_netdata_log_method[FILENAME_MAX + 50] = "";
+char env_netdata_log_format[FILENAME_MAX + 50] = "";
+char env_netdata_log_level[FILENAME_MAX + 50] = "";
char *environment[] = {
"PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
- environment_variable2,
- environment_variable3,
+ env_netdata_host_prefix,
+ env_netdata_log_method,
+ env_netdata_log_format,
+ env_netdata_log_level,
NULL
};
@@ -288,7 +292,8 @@ int switch_namespace(const char *prefix, pid_t pid) {
pid_t read_pid_from_cgroup_file(const char *filename) {
int fd = open(filename, procfile_open_flags);
if(fd == -1) {
- collector_error("Cannot open pid_from_cgroup() file '%s'.", filename);
+ if (errno != ENOENT)
+ collector_error("Cannot open pid_from_cgroup() file '%s'.", filename);
return 0;
}
@@ -648,12 +653,11 @@ void usage(void) {
}
int main(int argc, char **argv) {
- stderror = stderr;
pid_t pid = 0;
- program_name = argv[0];
program_version = VERSION;
- error_log_syslog = 0;
+ clocks_init();
+ nd_log_initialize_for_external_plugins("cgroup-network");
// since cgroup-network runs as root, prevent it from opening symbolic links
procfile_open_flags = O_RDONLY|O_NOFOLLOW;
@@ -662,7 +666,7 @@ int main(int argc, char **argv) {
// make sure NETDATA_HOST_PREFIX is safe
netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
- if(verify_netdata_host_prefix() == -1) exit(1);
+ if(verify_netdata_host_prefix(false) == -1) exit(1);
if(netdata_configured_host_prefix[0] != '\0' && verify_path(netdata_configured_host_prefix) == -1)
fatal("invalid NETDATA_HOST_PREFIX '%s'", netdata_configured_host_prefix);
@@ -671,11 +675,20 @@ int main(int argc, char **argv) {
// build a safe environment for our script
// the first environment variable is a fixed PATH=
- snprintfz(environment_variable2, sizeof(environment_variable2) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix);
+ snprintfz(env_netdata_host_prefix, sizeof(env_netdata_host_prefix) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix);
- char *s = getenv("NETDATA_LOG_SEVERITY_LEVEL");
+ char *s;
+
+ s = getenv("NETDATA_LOG_METHOD");
+ snprintfz(env_netdata_log_method, sizeof(env_netdata_log_method) - 1, "NETDATA_LOG_METHOD=%s", nd_log_method_for_external_plugins(s));
+
+ s = getenv("NETDATA_LOG_FORMAT");
+ if (s)
+ snprintfz(env_netdata_log_format, sizeof(env_netdata_log_format) - 1, "NETDATA_LOG_FORMAT=%s", s);
+
+ s = getenv("NETDATA_LOG_LEVEL");
if (s)
- snprintfz(environment_variable3, sizeof(environment_variable3) - 1, "NETDATA_LOG_SEVERITY_LEVEL=%s", s);
+ snprintfz(env_netdata_log_level, sizeof(env_netdata_log_level) - 1, "NETDATA_LOG_LEVEL=%s", s);
// ------------------------------------------------------------------------
@@ -686,8 +699,6 @@ int main(int argc, char **argv) {
if(argc != 3)
usage();
-
- log_set_global_severity_for_external_plugins();
int arg = 1;
int helper = 1;
diff --git a/collectors/cgroups.plugin/cgroup-top.c b/collectors/cgroups.plugin/cgroup-top.c
new file mode 100644
index 000000000..8d44d3b56
--- /dev/null
+++ b/collectors/cgroups.plugin/cgroup-top.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "cgroup-internals.h"
+
+struct cgroup_netdev_link {
+ size_t read_slot;
+ NETDATA_DOUBLE received[2];
+ NETDATA_DOUBLE sent[2];
+};
+
+static DICTIONARY *cgroup_netdev_link_dict = NULL;
+
+void cgroup_netdev_link_init(void) {
+ cgroup_netdev_link_dict = dictionary_create_advanced(DICT_OPTION_FIXED_SIZE|DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, sizeof(struct cgroup_netdev_link));
+}
+
+const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg) {
+ if(!cg->cgroup_netdev_link) {
+ struct cgroup_netdev_link t = {
+ .read_slot = 0,
+ .received = {NAN, NAN},
+ .sent = {NAN, NAN},
+ };
+
+ cg->cgroup_netdev_link =
+ dictionary_set_and_acquire_item(cgroup_netdev_link_dict, cg->id, &t, sizeof(struct cgroup_netdev_link));
+ }
+
+ return dictionary_acquired_item_dup(cgroup_netdev_link_dict, cg->cgroup_netdev_link);
+}
+
+void cgroup_netdev_delete(struct cgroup *cg) {
+ if(cg->cgroup_netdev_link) {
+ dictionary_acquired_item_release(cgroup_netdev_link_dict, cg->cgroup_netdev_link);
+ dictionary_del(cgroup_netdev_link_dict, cg->id);
+ dictionary_garbage_collect(cgroup_netdev_link_dict);
+ cg->cgroup_netdev_link = NULL;
+ }
+}
+
+void cgroup_netdev_release(const DICTIONARY_ITEM *link) {
+ if(link)
+ dictionary_acquired_item_release(cgroup_netdev_link_dict, link);
+}
+
+const void *cgroup_netdev_dup(const DICTIONARY_ITEM *link) {
+ return dictionary_acquired_item_dup(cgroup_netdev_link_dict, link);
+}
+
+void cgroup_netdev_reset_all(void) {
+ struct cgroup_netdev_link *t;
+ dfe_start_read(cgroup_netdev_link_dict, t) {
+ if(t->read_slot >= 1) {
+ t->read_slot = 0;
+ t->received[1] = NAN;
+ t->sent[1] = NAN;
+ }
+ else {
+ t->read_slot = 1;
+ t->received[0] = NAN;
+ t->sent[0] = NAN;
+ }
+ }
+ dfe_done(t);
+}
+
+void cgroup_netdev_add_bandwidth(const DICTIONARY_ITEM *link, NETDATA_DOUBLE received, NETDATA_DOUBLE sent) {
+ if(!link)
+ return;
+
+ struct cgroup_netdev_link *t = dictionary_acquired_item_value(link);
+
+ size_t slot = (t->read_slot) ? 0 : 1;
+
+ if(isnan(t->received[slot]))
+ t->received[slot] = received;
+ else
+ t->received[slot] += received;
+
+ if(isnan(t->sent[slot]))
+ t->sent[slot] = sent;
+ else
+ t->sent[slot] += sent;
+}
+
+void cgroup_netdev_get_bandwidth(struct cgroup *cg, NETDATA_DOUBLE *received, NETDATA_DOUBLE *sent) {
+ if(!cg->cgroup_netdev_link) {
+ *received = NAN;
+ *sent = NAN;
+ return;
+ }
+
+ struct cgroup_netdev_link *t = dictionary_acquired_item_value(cg->cgroup_netdev_link);
+
+ size_t slot = (t->read_slot) ? 1 : 0;
+
+ *received = t->received[slot];
+ *sent = t->sent[slot];
+}
+
+int cgroup_function_cgroup_top(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused,
+ void *collector_data __maybe_unused,
+ rrd_function_result_callback_t result_cb, void *result_cb_data,
+ rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
+ rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
+ void *register_canceller_cb_data __maybe_unused) {
+
+ buffer_flush(wb);
+ wb->content_type = CT_APPLICATION_JSON;
+ buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
+
+ buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
+ buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
+ buffer_json_member_add_string(wb, "type", "table");
+ buffer_json_member_add_time_t(wb, "update_every", 1);
+ buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_CGTOP_HELP);
+ buffer_json_member_add_array(wb, "data");
+
+ double max_pids = 0.0;
+ double max_cpu = 0.0;
+ double max_ram = 0.0;
+ double max_disk_io_read = 0.0;
+ double max_disk_io_written = 0.0;
+ double max_net_received = 0.0;
+ double max_net_sent = 0.0;
+
+ RRDDIM *rd = NULL;
+
+ uv_mutex_lock(&cgroup_root_mutex);
+
+ for(struct cgroup *cg = cgroup_root; cg ; cg = cg->next) {
+ if(unlikely(!cg->enabled || cg->pending_renames || !cg->function_ready || is_cgroup_systemd_service(cg)))
+ continue;
+
+ buffer_json_add_array_item_array(wb);
+
+ buffer_json_add_array_item_string(wb, cg->name); // Name
+
+ if(k8s_is_kubepod(cg))
+ buffer_json_add_array_item_string(wb, "k8s"); // Kind
+ else
+ buffer_json_add_array_item_string(wb, "cgroup"); // Kind
+
+ double pids_current = rrddim_get_last_stored_value(cg->st_pids_rd_pids_current, &max_pids, 1.0);
+
+ double cpu = NAN;
+ if (cg->st_cpu_rd_user && cg->st_cpu_rd_system) {
+ cpu = cg->st_cpu_rd_user->collector.last_stored_value + cg->st_cpu_rd_system->collector.last_stored_value;
+ max_cpu = MAX(max_cpu, cpu);
+ }
+
+ double ram = rrddim_get_last_stored_value(cg->st_mem_rd_ram, &max_ram, 1.0);
+
+ rd = cg->st_throttle_io_rd_read ? cg->st_throttle_io_rd_read : cg->st_io_rd_read;
+ double disk_io_read = rrddim_get_last_stored_value(rd, &max_disk_io_read, 1024.0);
+ rd = cg->st_throttle_io_rd_written ? cg->st_throttle_io_rd_written : cg->st_io_rd_written;
+ double disk_io_written = rrddim_get_last_stored_value(rd, &max_disk_io_written, 1024.0);
+
+ NETDATA_DOUBLE received, sent;
+ cgroup_netdev_get_bandwidth(cg, &received, &sent);
+ if (!isnan(received) && !isnan(sent)) {
+ received /= 1000.0;
+ sent /= 1000.0;
+ max_net_received = MAX(max_net_received, received);
+ max_net_sent = MAX(max_net_sent, sent);
+ }
+
+ buffer_json_add_array_item_double(wb, pids_current);
+ buffer_json_add_array_item_double(wb, cpu);
+ buffer_json_add_array_item_double(wb, ram);
+ buffer_json_add_array_item_double(wb, disk_io_read);
+ buffer_json_add_array_item_double(wb, disk_io_written);
+ buffer_json_add_array_item_double(wb, received);
+ buffer_json_add_array_item_double(wb, sent);
+
+ buffer_json_array_close(wb);
+ }
+
+ uv_mutex_unlock(&cgroup_root_mutex);
+
+ buffer_json_array_close(wb); // data
+ buffer_json_member_add_object(wb, "columns");
+ {
+ size_t field_id = 0;
+
+ // Node
+ buffer_rrdf_table_add_field(wb, field_id++, "Name", "CGROUP Name",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH,
+ NULL);
+
+ // Kind
+ buffer_rrdf_table_add_field(wb, field_id++, "Kind", "CGROUP Kind",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // PIDs
+ buffer_rrdf_table_add_field(wb, field_id++, "PIDs", "Number of Processes Currently in the CGROUP",
+ RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
+ 0, "pids", max_pids, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // CPU
+ buffer_rrdf_table_add_field(wb, field_id++, "CPU", "CPU Usage",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "%", max_cpu, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // RAM
+ buffer_rrdf_table_add_field(wb, field_id++, "RAM", "RAM Usage",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "MiB", max_ram, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // Disk IO Reads
+ buffer_rrdf_table_add_field(wb, field_id++, "Reads", "Disk Read Data",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "MiB", max_disk_io_read, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // Disk IO Writes
+ buffer_rrdf_table_add_field(wb, field_id++, "Writes", "Disk Written Data",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "MiB", max_disk_io_written, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // Network Received
+ buffer_rrdf_table_add_field(wb, field_id++, "Received", "Network Traffic Received",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Mbps", max_net_received, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // Network Sent
+ buffer_rrdf_table_add_field(wb, field_id++, "Sent", "Network Traffic Sent ",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Mbps", max_net_sent, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ }
+ buffer_json_object_close(wb); // columns
+ buffer_json_member_add_string(wb, "default_sort_column", "CPU");
+
+ buffer_json_member_add_object(wb, "charts");
+ {
+ buffer_json_member_add_object(wb, "CPU");
+ {
+ buffer_json_member_add_string(wb, "name", "CPU");
+ buffer_json_member_add_string(wb, "type", "stacked-bar");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "CPU");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "Memory");
+ {
+ buffer_json_member_add_string(wb, "name", "Memory");
+ buffer_json_member_add_string(wb, "type", "stacked-bar");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "RAM");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "Traffic");
+ {
+ buffer_json_member_add_string(wb, "name", "Traffic");
+ buffer_json_member_add_string(wb, "type", "stacked-bar");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "Received");
+ buffer_json_add_array_item_string(wb, "Sent");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb); // charts
+
+ buffer_json_member_add_array(wb, "default_charts");
+ {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, "CPU");
+ buffer_json_add_array_item_string(wb, "Name");
+ buffer_json_array_close(wb);
+
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, "Memory");
+ buffer_json_add_array_item_string(wb, "Name");
+ buffer_json_array_close(wb);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_object(wb, "group_by");
+ {
+ buffer_json_member_add_object(wb, "Kind");
+ {
+ buffer_json_member_add_string(wb, "name", "Kind");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "Kind");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb); // group_by
+
+ buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
+ buffer_json_finalize(wb);
+
+ int response = HTTP_RESP_OK;
+ if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
+ buffer_flush(wb);
+ response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
+ }
+
+ if(result_cb)
+ result_cb(wb, response, result_cb_data);
+
+ return response;
+}
+
+int cgroup_function_systemd_top(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused,
+ void *collector_data __maybe_unused,
+ rrd_function_result_callback_t result_cb, void *result_cb_data,
+ rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
+ rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
+ void *register_canceller_cb_data __maybe_unused) {
+
+ buffer_flush(wb);
+ wb->content_type = CT_APPLICATION_JSON;
+ buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
+
+ buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
+ buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
+ buffer_json_member_add_string(wb, "type", "table");
+ buffer_json_member_add_time_t(wb, "update_every", 1);
+ buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_CGTOP_HELP);
+ buffer_json_member_add_array(wb, "data");
+
+ double max_pids = 0.0;
+ double max_cpu = 0.0;
+ double max_ram = 0.0;
+ double max_disk_io_read = 0.0;
+ double max_disk_io_written = 0.0;
+
+ RRDDIM *rd = NULL;
+
+ uv_mutex_lock(&cgroup_root_mutex);
+
+ for(struct cgroup *cg = cgroup_root; cg ; cg = cg->next) {
+ if(unlikely(!cg->enabled || cg->pending_renames || !cg->function_ready || !is_cgroup_systemd_service(cg)))
+ continue;
+
+ buffer_json_add_array_item_array(wb);
+
+ buffer_json_add_array_item_string(wb, cg->name);
+
+ double pids_current = rrddim_get_last_stored_value(cg->st_pids_rd_pids_current, &max_pids, 1.0);
+
+ double cpu = NAN;
+ if (cg->st_cpu_rd_user && cg->st_cpu_rd_system) {
+ cpu = cg->st_cpu_rd_user->collector.last_stored_value + cg->st_cpu_rd_system->collector.last_stored_value;
+ max_cpu = MAX(max_cpu, cpu);
+ }
+
+ double ram = rrddim_get_last_stored_value(cg->st_mem_rd_ram, &max_ram, 1.0);
+
+ rd = cg->st_throttle_io_rd_read ? cg->st_throttle_io_rd_read : cg->st_io_rd_read;
+ double disk_io_read = rrddim_get_last_stored_value(rd, &max_disk_io_read, 1024.0);
+ rd = cg->st_throttle_io_rd_written ? cg->st_throttle_io_rd_written : cg->st_io_rd_written;
+ double disk_io_written = rrddim_get_last_stored_value(rd, &max_disk_io_written, 1024.0);
+
+ buffer_json_add_array_item_double(wb, pids_current);
+ buffer_json_add_array_item_double(wb, cpu);
+ buffer_json_add_array_item_double(wb, ram);
+ buffer_json_add_array_item_double(wb, disk_io_read);
+ buffer_json_add_array_item_double(wb, disk_io_written);
+
+ buffer_json_array_close(wb);
+ }
+
+ uv_mutex_unlock(&cgroup_root_mutex);
+
+ buffer_json_array_close(wb); // data
+ buffer_json_member_add_object(wb, "columns");
+ {
+ size_t field_id = 0;
+
+ // Node
+ buffer_rrdf_table_add_field(wb, field_id++, "Name", "Systemd Service Name",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH,
+ NULL);
+
+ // PIDs
+ buffer_rrdf_table_add_field(wb, field_id++, "PIDs", "Number of Processes Currently in the CGROUP",
+ RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
+ 0, "pids", max_pids, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // CPU
+ buffer_rrdf_table_add_field(wb, field_id++, "CPU", "CPU Usage",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "%", max_cpu, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // RAM
+ buffer_rrdf_table_add_field(wb, field_id++, "RAM", "RAM Usage",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "MiB", max_ram, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // Disk IO Reads
+ buffer_rrdf_table_add_field(wb, field_id++, "Reads", "Disk Read Data",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "MiB", max_disk_io_read, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ // Disk IO Writes
+ buffer_rrdf_table_add_field(wb, field_id++, "Writes", "Disk Written Data",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "MiB", max_disk_io_written, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ }
+
+ buffer_json_object_close(wb); // columns
+ buffer_json_member_add_string(wb, "default_sort_column", "CPU");
+
+ buffer_json_member_add_object(wb, "charts");
+ {
+ buffer_json_member_add_object(wb, "CPU");
+ {
+ buffer_json_member_add_string(wb, "name", "CPU");
+ buffer_json_member_add_string(wb, "type", "stacked-bar");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "CPU");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "Memory");
+ {
+ buffer_json_member_add_string(wb, "name", "Memory");
+ buffer_json_member_add_string(wb, "type", "stacked-bar");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "RAM");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb); // charts
+
+ buffer_json_member_add_array(wb, "default_charts");
+ {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, "CPU");
+ buffer_json_add_array_item_string(wb, "Name");
+ buffer_json_array_close(wb);
+
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, "Memory");
+ buffer_json_add_array_item_string(wb, "Name");
+ buffer_json_array_close(wb);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
+ buffer_json_finalize(wb);
+
+ int response = HTTP_RESP_OK;
+ if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
+ buffer_flush(wb);
+ response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
+ }
+
+ if(result_cb)
+ result_cb(wb, response, result_cb_data);
+
+ return response;
+}
diff --git a/collectors/cgroups.plugin/integrations/containers.md b/collectors/cgroups.plugin/integrations/containers.md
index 6dec9ce2b..6273d1e91 100644
--- a/collectors/cgroups.plugin/integrations/containers.md
+++ b/collectors/cgroups.plugin/integrations/containers.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.pl
sidebar_label: "Containers"
learn_status: "Published"
learn_rel_path: "Data Collection/Containers and VMs"
+most_popular: True
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -60,8 +61,8 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
Metrics:
@@ -99,6 +100,7 @@ Metrics:
| cgroup.io_some_pressure_stall_time | time | ms |
| cgroup.io_full_pressure | some10, some60, some300 | percentage |
| cgroup.io_full_pressure_stall_time | time | ms |
+| cgroup.pids_current | pids | pids |
### Per cgroup network device
@@ -108,10 +110,11 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
-| device | TBD |
-| interface_type | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
+| device | The name of the host network interface linked to the container's network interface. |
+| container_device | Container network interface name. |
+| interface_type | Network interface type. Always "virtual" for the containers. |
Metrics:
diff --git a/collectors/cgroups.plugin/integrations/kubernetes_containers.md b/collectors/cgroups.plugin/integrations/kubernetes_containers.md
index 4bfa55c6d..9be32a12a 100644
--- a/collectors/cgroups.plugin/integrations/kubernetes_containers.md
+++ b/collectors/cgroups.plugin/integrations/kubernetes_containers.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.pl
sidebar_label: "Kubernetes Containers"
learn_status: "Published"
learn_rel_path: "Data Collection/Kubernetes"
+most_popular: True
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -54,23 +55,21 @@ The scope defines the instance that the metric belongs to. An instance is unique
### Per k8s cgroup
-
+These metrics refer to the Pod container.
Labels:
| Label | Description |
|:-----------|:----------------|
-| k8s_namespace | TBD |
-| k8s_pod_name | TBD |
-| k8s_pod_uid | TBD |
-| k8s_controller_kind | TBD |
-| k8s_controller_name | TBD |
-| k8s_node_name | TBD |
-| k8s_container_name | TBD |
-| k8s_container_id | TBD |
-| k8s_kind | TBD |
-| k8s_qos_class | TBD |
-| k8s_cluster_id | TBD |
+| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |
+| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |
+| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |
+| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |
+| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |
+| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |
+| k8s_kind | Instance kind: "pod" or "container". |
+| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |
+| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |
Metrics:
@@ -108,28 +107,28 @@ Metrics:
| k8s.cgroup.io_some_pressure_stall_time | time | ms |
| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |
| k8s.cgroup.io_full_pressure_stall_time | time | ms |
+| k8s.cgroup.pids_current | pids | pids |
### Per k8s cgroup network device
-
+These metrics refer to the Pod container network interface.
Labels:
| Label | Description |
|:-----------|:----------------|
-| device | TBD |
-| interface_type | TBD |
-| k8s_namespace | TBD |
-| k8s_pod_name | TBD |
-| k8s_pod_uid | TBD |
-| k8s_controller_kind | TBD |
-| k8s_controller_name | TBD |
-| k8s_node_name | TBD |
-| k8s_container_name | TBD |
-| k8s_container_id | TBD |
-| k8s_kind | TBD |
-| k8s_qos_class | TBD |
-| k8s_cluster_id | TBD |
+| device | The name of the host network interface linked to the container's network interface. |
+| container_device | Container network interface name. |
+| interface_type | Network interface type. Always "virtual" for the containers. |
+| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |
+| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |
+| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |
+| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |
+| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |
+| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |
+| k8s_kind | Instance kind: "pod" or "container". |
+| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |
+| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |
Metrics:
diff --git a/collectors/cgroups.plugin/integrations/libvirt_containers.md b/collectors/cgroups.plugin/integrations/libvirt_containers.md
index af0310b10..fed454698 100644
--- a/collectors/cgroups.plugin/integrations/libvirt_containers.md
+++ b/collectors/cgroups.plugin/integrations/libvirt_containers.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.pl
sidebar_label: "Libvirt Containers"
learn_status: "Published"
learn_rel_path: "Data Collection/Containers and VMs"
+most_popular: True
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -60,8 +61,8 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
Metrics:
@@ -99,6 +100,7 @@ Metrics:
| cgroup.io_some_pressure_stall_time | time | ms |
| cgroup.io_full_pressure | some10, some60, some300 | percentage |
| cgroup.io_full_pressure_stall_time | time | ms |
+| cgroup.pids_current | pids | pids |
### Per cgroup network device
@@ -108,10 +110,11 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
-| device | TBD |
-| interface_type | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
+| device | The name of the host network interface linked to the container's network interface. |
+| container_device | Container network interface name. |
+| interface_type | Network interface type. Always "virtual" for the containers. |
Metrics:
diff --git a/collectors/cgroups.plugin/integrations/lxc_containers.md b/collectors/cgroups.plugin/integrations/lxc_containers.md
index becc9ae17..3f05ffd5f 100644
--- a/collectors/cgroups.plugin/integrations/lxc_containers.md
+++ b/collectors/cgroups.plugin/integrations/lxc_containers.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.pl
sidebar_label: "LXC Containers"
learn_status: "Published"
learn_rel_path: "Data Collection/Containers and VMs"
+most_popular: True
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -60,8 +61,8 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
Metrics:
@@ -99,6 +100,7 @@ Metrics:
| cgroup.io_some_pressure_stall_time | time | ms |
| cgroup.io_full_pressure | some10, some60, some300 | percentage |
| cgroup.io_full_pressure_stall_time | time | ms |
+| cgroup.pids_current | pids | pids |
### Per cgroup network device
@@ -108,10 +110,11 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
-| device | TBD |
-| interface_type | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
+| device | The name of the host network interface linked to the container's network interface. |
+| container_device | Container network interface name. |
+| interface_type | Network interface type. Always "virtual" for the containers. |
Metrics:
diff --git a/collectors/cgroups.plugin/integrations/ovirt_containers.md b/collectors/cgroups.plugin/integrations/ovirt_containers.md
index c9f6d74b7..5771aeea1 100644
--- a/collectors/cgroups.plugin/integrations/ovirt_containers.md
+++ b/collectors/cgroups.plugin/integrations/ovirt_containers.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.pl
sidebar_label: "oVirt Containers"
learn_status: "Published"
learn_rel_path: "Data Collection/Containers and VMs"
+most_popular: True
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -60,8 +61,8 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
Metrics:
@@ -99,6 +100,7 @@ Metrics:
| cgroup.io_some_pressure_stall_time | time | ms |
| cgroup.io_full_pressure | some10, some60, some300 | percentage |
| cgroup.io_full_pressure_stall_time | time | ms |
+| cgroup.pids_current | pids | pids |
### Per cgroup network device
@@ -108,10 +110,11 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
-| device | TBD |
-| interface_type | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
+| device | The name of the host network interface linked to the container's network interface. |
+| container_device | Container network interface name. |
+| interface_type | Network interface type. Always "virtual" for the containers. |
Metrics:
diff --git a/collectors/cgroups.plugin/integrations/proxmox_containers.md b/collectors/cgroups.plugin/integrations/proxmox_containers.md
index 2caad5eac..1804a40ca 100644
--- a/collectors/cgroups.plugin/integrations/proxmox_containers.md
+++ b/collectors/cgroups.plugin/integrations/proxmox_containers.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.pl
sidebar_label: "Proxmox Containers"
learn_status: "Published"
learn_rel_path: "Data Collection/Containers and VMs"
+most_popular: True
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -60,8 +61,8 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
Metrics:
@@ -99,6 +100,7 @@ Metrics:
| cgroup.io_some_pressure_stall_time | time | ms |
| cgroup.io_full_pressure | some10, some60, some300 | percentage |
| cgroup.io_full_pressure_stall_time | time | ms |
+| cgroup.pids_current | pids | pids |
### Per cgroup network device
@@ -108,10 +110,11 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
-| device | TBD |
-| interface_type | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
+| device | The name of the host network interface linked to the container's network interface. |
+| container_device | Container network interface name. |
+| interface_type | Network interface type. Always "virtual" for the containers. |
Metrics:
diff --git a/collectors/cgroups.plugin/integrations/systemd_services.md b/collectors/cgroups.plugin/integrations/systemd_services.md
index b71060050..0ce906366 100644
--- a/collectors/cgroups.plugin/integrations/systemd_services.md
+++ b/collectors/cgroups.plugin/integrations/systemd_services.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.pl
sidebar_label: "Systemd Services"
learn_status: "Published"
learn_rel_path: "Data Collection/Systemd"
+most_popular: True
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -79,6 +80,7 @@ Metrics:
| systemd.service.disk.throttle.iops | read, write | operations/s |
| systemd.service.disk.queued_iops | read, write | operations/s |
| systemd.service.disk.merged_iops | read, write | operations/s |
+| systemd.service.pids.current | pids | pids |
diff --git a/collectors/cgroups.plugin/integrations/virtual_machines.md b/collectors/cgroups.plugin/integrations/virtual_machines.md
index 3bb79c128..6a64923c4 100644
--- a/collectors/cgroups.plugin/integrations/virtual_machines.md
+++ b/collectors/cgroups.plugin/integrations/virtual_machines.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.pl
sidebar_label: "Virtual Machines"
learn_status: "Published"
learn_rel_path: "Data Collection/Containers and VMs"
+most_popular: True
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -60,8 +61,8 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
Metrics:
@@ -99,6 +100,7 @@ Metrics:
| cgroup.io_some_pressure_stall_time | time | ms |
| cgroup.io_full_pressure | some10, some60, some300 | percentage |
| cgroup.io_full_pressure_stall_time | time | ms |
+| cgroup.pids_current | pids | pids |
### Per cgroup network device
@@ -108,10 +110,11 @@ Labels:
| Label | Description |
|:-----------|:----------------|
-| container_name | TBD |
-| image | TBD |
-| device | TBD |
-| interface_type | TBD |
+| container_name | The container name or group path if name resolution fails. |
+| image | Docker/Podman container image name. |
+| device | The name of the host network interface linked to the container's network interface. |
+| container_device | Container network interface name. |
+| interface_type | Network interface type. Always "virtual" for the containers. |
Metrics:
diff --git a/collectors/cgroups.plugin/metadata.yaml b/collectors/cgroups.plugin/metadata.yaml
index ec6228ea2..a1abbb5a9 100644
--- a/collectors/cgroups.plugin/metadata.yaml
+++ b/collectors/cgroups.plugin/metadata.yaml
@@ -86,9 +86,9 @@ modules:
description: ""
labels:
- name: container_name
- description: TBD
+ description: The container name or group path if name resolution fails.
- name: image
- description: TBD
+ description: Docker/Podman container image name.
metrics:
- name: cgroup.cpu_limit
description: CPU Usage within the limits
@@ -310,17 +310,25 @@ modules:
chart_type: line
dimensions:
- name: time
+ - name: cgroup.pids_current
+ description: Number of processes
+ unit: "pids"
+ chart_type: line
+ dimensions:
+ - name: pids
- name: cgroup network device
description: ""
labels:
- name: container_name
- description: TBD
+ description: The container name or group path if name resolution fails.
- name: image
- description: TBD
+ description: Docker/Podman container image name.
- name: device
- description: TBD
+ description: "The name of the host network interface linked to the container's network interface."
+ - name: container_device
+ description: Container network interface name.
- name: interface_type
- description: TBD
+ description: 'Network interface type. Always "virtual" for the containers.'
metrics:
- name: cgroup.net_net
description: Bandwidth
@@ -445,30 +453,26 @@ modules:
availability: []
scopes:
- name: k8s cgroup
- description: ""
+ description: These metrics refer to the Pod container.
labels:
+ - name: k8s_node_name
+ description: 'Node name. The value of _pod.spec.nodeName_.'
- name: k8s_namespace
- description: TBD
- - name: k8s_pod_name
- description: TBD
- - name: k8s_pod_uid
- description: TBD
+ description: 'Namespace name. The value of _pod.metadata.namespace_.'
- name: k8s_controller_kind
- description: TBD
+ description: 'Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_.'
- name: k8s_controller_name
- description: TBD
- - name: k8s_node_name
- description: TBD
+ description: 'Controller name.The value of _pod.OwnerReferences.Controller.Name_.'
+ - name: k8s_pod_name
+ description: 'Pod name. The value of _pod.metadata.name_.'
- name: k8s_container_name
- description: TBD
- - name: k8s_container_id
- description: TBD
+ description: 'Container name. The value of _pod.spec.containers.name_.'
- name: k8s_kind
- description: TBD
+ description: 'Instance kind: "pod" or "container".'
- name: k8s_qos_class
- description: TBD
+ description: 'QoS class (guaranteed, burstable, besteffort).'
- name: k8s_cluster_id
- description: TBD
+ description: 'Cluster ID. The value of kube-system namespace _namespace.metadata.uid_.'
metrics:
- name: k8s.cgroup.cpu_limit
description: CPU Usage within the limits
@@ -690,35 +694,39 @@ modules:
chart_type: line
dimensions:
- name: time
+ - name: k8s.cgroup.pids_current
+ description: Number of processes
+ unit: "pids"
+ chart_type: line
+ dimensions:
+ - name: pids
- name: k8s cgroup network device
- description: ""
+ description: These metrics refer to the Pod container network interface.
labels:
- name: device
- description: TBD
+ description: "The name of the host network interface linked to the container's network interface."
+ - name: container_device
+ description: Container network interface name.
- name: interface_type
- description: TBD
+ description: 'Network interface type. Always "virtual" for the containers.'
+ - name: k8s_node_name
+ description: 'Node name. The value of _pod.spec.nodeName_.'
- name: k8s_namespace
- description: TBD
- - name: k8s_pod_name
- description: TBD
- - name: k8s_pod_uid
- description: TBD
+ description: 'Namespace name. The value of _pod.metadata.namespace_.'
- name: k8s_controller_kind
- description: TBD
+ description: 'Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_.'
- name: k8s_controller_name
- description: TBD
- - name: k8s_node_name
- description: TBD
+ description: 'Controller name.The value of _pod.OwnerReferences.Controller.Name_.'
+ - name: k8s_pod_name
+ description: 'Pod name. The value of _pod.metadata.name_.'
- name: k8s_container_name
- description: TBD
- - name: k8s_container_id
- description: TBD
+ description: 'Container name. The value of _pod.spec.containers.name_.'
- name: k8s_kind
- description: TBD
+ description: 'Instance kind: "pod" or "container".'
- name: k8s_qos_class
- description: TBD
+ description: 'QoS class (guaranteed, burstable, besteffort).'
- name: k8s_cluster_id
- description: TBD
+ description: 'Cluster ID. The value of kube-system namespace _namespace.metadata.uid_.'
metrics:
- name: k8s.cgroup.net_net
description: Bandwidth
@@ -919,6 +927,12 @@ modules:
dimensions:
- name: read
- name: write
+ - name: systemd.service.pids.current
+ description: Systemd Services Number of Processes
+ unit: pids
+ chart_type: line
+ dimensions:
+ - name: pids
- <<: *module
meta:
<<: *meta
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c
index 6196e7603..705edf6f7 100644
--- a/collectors/cgroups.plugin/sys_fs_cgroup.c
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.c
@@ -1,149 +1,98 @@
// SPDX-License-Identifier: GPL-3.0-or-later
-#include "sys_fs_cgroup.h"
-
-#define PLUGIN_CGROUPS_NAME "cgroups.plugin"
-#define PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME "systemd"
-#define PLUGIN_CGROUPS_MODULE_CGROUPS_NAME "/sys/fs/cgroup"
-
-#ifdef NETDATA_INTERNAL_CHECKS
-#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_DEFAULT
-#else
-#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_NO_ERROR_ON_FILE_IO
-#endif
+#include "cgroup-internals.h"
// main cgroups thread worker jobs
#define WORKER_CGROUPS_LOCK 0
#define WORKER_CGROUPS_READ 1
#define WORKER_CGROUPS_CHART 2
-// discovery cgroup thread worker jobs
-#define WORKER_DISCOVERY_INIT 0
-#define WORKER_DISCOVERY_FIND 1
-#define WORKER_DISCOVERY_PROCESS 2
-#define WORKER_DISCOVERY_PROCESS_RENAME 3
-#define WORKER_DISCOVERY_PROCESS_NETWORK 4
-#define WORKER_DISCOVERY_PROCESS_FIRST_TIME 5
-#define WORKER_DISCOVERY_UPDATE 6
-#define WORKER_DISCOVERY_CLEANUP 7
-#define WORKER_DISCOVERY_COPY 8
-#define WORKER_DISCOVERY_SHARE 9
-#define WORKER_DISCOVERY_LOCK 10
-
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 11
-#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 11
-#endif
-
// ----------------------------------------------------------------------------
// cgroup globals
+unsigned long long host_ram_total = 0;
+int is_inside_k8s = 0;
+long system_page_size = 4096; // system will be queried via sysconf() in configuration()
+int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
+int cgroup_enable_cpuacct_cpu_throttling = CONFIG_BOOLEAN_YES;
+int cgroup_enable_cpuacct_cpu_shares = CONFIG_BOOLEAN_NO;
+int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_pressure_cpu = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_pressure_io_some = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_pressure_io_full = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_pressure_memory_some = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_pressure_irq_some = CONFIG_BOOLEAN_NO;
+int cgroup_enable_pressure_irq_full = CONFIG_BOOLEAN_AUTO;
+int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES;
+int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
+int cgroup_used_memory = CONFIG_BOOLEAN_YES;
+int cgroup_use_unified_cgroups = CONFIG_BOOLEAN_NO;
+int cgroup_unified_exist = CONFIG_BOOLEAN_AUTO;
+int cgroup_search_in_devices = 1;
+int cgroup_check_for_new_every = 10;
+int cgroup_update_every = 1;
+int cgroup_containers_chart_priority = NETDATA_CHART_PRIO_CGROUPS_CONTAINERS;
+int cgroup_recheck_zero_blkio_every_iterations = 10;
+int cgroup_recheck_zero_mem_failcnt_every_iterations = 10;
+int cgroup_recheck_zero_mem_detailed_every_iterations = 10;
+char *cgroup_cpuacct_base = NULL;
+char *cgroup_cpuset_base = NULL;
+char *cgroup_blkio_base = NULL;
+char *cgroup_memory_base = NULL;
+char *cgroup_devices_base = NULL;
+char *cgroup_pids_base = NULL;
+char *cgroup_unified_base = NULL;
+int cgroup_root_count = 0;
+int cgroup_root_max = 1000;
+int cgroup_max_depth = 0;
+SIMPLE_PATTERN *enabled_cgroup_paths = NULL;
+SIMPLE_PATTERN *enabled_cgroup_names = NULL;
+SIMPLE_PATTERN *search_cgroup_paths = NULL;
+SIMPLE_PATTERN *enabled_cgroup_renames = NULL;
+SIMPLE_PATTERN *systemd_services_cgroups = NULL;
+SIMPLE_PATTERN *entrypoint_parent_process_comm = NULL;
+char *cgroups_network_interface_script = NULL;
+int cgroups_check = 0;
+uint32_t Read_hash = 0;
+uint32_t Write_hash = 0;
+uint32_t user_hash = 0;
+uint32_t system_hash = 0;
+uint32_t user_usec_hash = 0;
+uint32_t system_usec_hash = 0;
+uint32_t nr_periods_hash = 0;
+uint32_t nr_throttled_hash = 0;
+uint32_t throttled_time_hash = 0;
+uint32_t throttled_usec_hash = 0;
-static char cgroup_chart_id_prefix[] = "cgroup_";
-static char services_chart_id_prefix[] = "systemd_";
-
-static int is_inside_k8s = 0;
-
-static long system_page_size = 4096; // system will be queried via sysconf() in configuration()
-
-static int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_cpuacct_cpu_throttling = CONFIG_BOOLEAN_YES;
-static int cgroup_enable_cpuacct_cpu_shares = CONFIG_BOOLEAN_NO;
-static int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_pressure_cpu = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_pressure_io_some = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_pressure_io_full = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_pressure_memory_some = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_AUTO;
-static int cgroup_enable_pressure_irq_some = CONFIG_BOOLEAN_NO;
-static int cgroup_enable_pressure_irq_full = CONFIG_BOOLEAN_AUTO;
-
-static int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES;
-static int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
-static int cgroup_used_memory = CONFIG_BOOLEAN_YES;
-
-static int cgroup_use_unified_cgroups = CONFIG_BOOLEAN_NO;
-static int cgroup_unified_exist = CONFIG_BOOLEAN_AUTO;
-
-static int cgroup_search_in_devices = 1;
-
-static int cgroup_check_for_new_every = 10;
-static int cgroup_update_every = 1;
-static int cgroup_containers_chart_priority = NETDATA_CHART_PRIO_CGROUPS_CONTAINERS;
-
-static int cgroup_recheck_zero_blkio_every_iterations = 10;
-static int cgroup_recheck_zero_mem_failcnt_every_iterations = 10;
-static int cgroup_recheck_zero_mem_detailed_every_iterations = 10;
-
-static char *cgroup_cpuacct_base = NULL;
-static char *cgroup_cpuset_base = NULL;
-static char *cgroup_blkio_base = NULL;
-static char *cgroup_memory_base = NULL;
-static char *cgroup_devices_base = NULL;
-static char *cgroup_unified_base = NULL;
-
-static int cgroup_root_count = 0;
-static int cgroup_root_max = 1000;
-static int cgroup_max_depth = 0;
-
-static SIMPLE_PATTERN *enabled_cgroup_paths = NULL;
-static SIMPLE_PATTERN *enabled_cgroup_names = NULL;
-static SIMPLE_PATTERN *search_cgroup_paths = NULL;
-static SIMPLE_PATTERN *enabled_cgroup_renames = NULL;
-static SIMPLE_PATTERN *systemd_services_cgroups = NULL;
-
-static SIMPLE_PATTERN *entrypoint_parent_process_comm = NULL;
-
-static char *cgroups_rename_script = NULL;
-static char *cgroups_network_interface_script = NULL;
-
-static int cgroups_check = 0;
-
-static uint32_t Read_hash = 0;
-static uint32_t Write_hash = 0;
-static uint32_t user_hash = 0;
-static uint32_t system_hash = 0;
-static uint32_t user_usec_hash = 0;
-static uint32_t system_usec_hash = 0;
-static uint32_t nr_periods_hash = 0;
-static uint32_t nr_throttled_hash = 0;
-static uint32_t throttled_time_hash = 0;
-static uint32_t throttled_usec_hash = 0;
-
-enum cgroups_type { CGROUPS_AUTODETECT_FAIL, CGROUPS_V1, CGROUPS_V2 };
-
-enum cgroups_systemd_setting {
- SYSTEMD_CGROUP_ERR,
- SYSTEMD_CGROUP_LEGACY,
- SYSTEMD_CGROUP_HYBRID,
- SYSTEMD_CGROUP_UNIFIED
-};
-
-struct cgroups_systemd_config_setting {
- char *name;
- enum cgroups_systemd_setting setting;
-};
+// *** WARNING *** The fields are not thread safe. Take care of safe usage.
+struct cgroup *cgroup_root = NULL;
+uv_mutex_t cgroup_root_mutex;
-static struct cgroups_systemd_config_setting cgroups_systemd_options[] = {
- { .name = "legacy", .setting = SYSTEMD_CGROUP_LEGACY },
- { .name = "hybrid", .setting = SYSTEMD_CGROUP_HYBRID },
- { .name = "unified", .setting = SYSTEMD_CGROUP_UNIFIED },
- { .name = NULL, .setting = SYSTEMD_CGROUP_ERR },
+struct cgroups_systemd_config_setting cgroups_systemd_options[] = {
+ { .name = "legacy", .setting = SYSTEMD_CGROUP_LEGACY },
+ { .name = "hybrid", .setting = SYSTEMD_CGROUP_HYBRID },
+ { .name = "unified", .setting = SYSTEMD_CGROUP_UNIFIED },
+ { .name = NULL, .setting = SYSTEMD_CGROUP_ERR },
};
// Shared memory with information from detected cgroups
netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf = {NULL, NULL};
-static int shm_fd_cgroup_ebpf = -1;
+int shm_fd_cgroup_ebpf = -1;
sem_t *shm_mutex_cgroup_ebpf = SEM_FAILED;
+struct discovery_thread discovery_thread;
+
+
/* on Fed systemd is not in PATH for some reason */
#define SYSTEMD_CMD_RHEL "/usr/lib/systemd/systemd --version"
#define SYSTEMD_HIERARCHY_STRING "default-hierarchy="
@@ -362,54 +311,70 @@ void read_cgroup_plugin_configuration() {
cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_NO;
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct");
- if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct");
- if(!mi) {
+ if (!mi)
+ mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct");
+ if (!mi) {
collector_error("CGROUP: cannot find cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct");
s = "/sys/fs/cgroup/cpuacct";
- }
- else s = mi->mount_point;
+ } else
+ s = mi->mount_point;
set_cgroup_base_path(filename, s);
cgroup_cpuacct_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuacct", filename);
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuset");
- if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuset");
- if(!mi) {
+ if (!mi)
+ mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuset");
+ if (!mi) {
collector_error("CGROUP: cannot find cpuset mountinfo. Assuming default: /sys/fs/cgroup/cpuset");
s = "/sys/fs/cgroup/cpuset";
- }
- else s = mi->mount_point;
+ } else
+ s = mi->mount_point;
set_cgroup_base_path(filename, s);
cgroup_cpuset_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuset", filename);
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "blkio");
- if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio");
- if(!mi) {
+ if (!mi)
+ mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio");
+ if (!mi) {
collector_error("CGROUP: cannot find blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio");
s = "/sys/fs/cgroup/blkio";
- }
- else s = mi->mount_point;
+ } else
+ s = mi->mount_point;
set_cgroup_base_path(filename, s);
cgroup_blkio_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/blkio", filename);
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "memory");
- if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory");
- if(!mi) {
+ if (!mi)
+ mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory");
+ if (!mi) {
collector_error("CGROUP: cannot find memory mountinfo. Assuming default: /sys/fs/cgroup/memory");
s = "/sys/fs/cgroup/memory";
- }
- else s = mi->mount_point;
+ } else
+ s = mi->mount_point;
set_cgroup_base_path(filename, s);
cgroup_memory_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/memory", filename);
mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "devices");
- if(!mi) mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices");
- if(!mi) {
+ if (!mi)
+ mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices");
+ if (!mi) {
collector_error("CGROUP: cannot find devices mountinfo. Assuming default: /sys/fs/cgroup/devices");
s = "/sys/fs/cgroup/devices";
- }
- else s = mi->mount_point;
+ } else
+ s = mi->mount_point;
set_cgroup_base_path(filename, s);
cgroup_devices_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/devices", filename);
+
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "pids");
+ if (!mi)
+ mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "pids");
+ if (!mi) {
+ collector_error("CGROUP: cannot find pids mountinfo. Assuming default: /sys/fs/cgroup/pids");
+ s = "/sys/fs/cgroup/pids";
+ } else
+ s = mi->mount_point;
+ set_cgroup_base_path(filename, s);
+ cgroup_pids_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/pids", filename);
}
else {
//cgroup_enable_cpuacct_stat =
@@ -429,22 +394,19 @@ void read_cgroup_plugin_configuration() {
cgroup_used_memory = CONFIG_BOOLEAN_NO; //unified cgroups use different values
//TODO: can there be more than 1 cgroup2 mount point?
- mi = mountinfo_find_by_filesystem_super_option(root, "cgroup2", "rw"); //there is no cgroup2 specific super option - for now use 'rw' option
- if(mi)
- netdata_log_debug(D_CGROUP, "found unified cgroup root using super options, with path: '%s'", mi->mount_point);
- if(!mi) {
+ //there is no cgroup2 specific super option - for now use 'rw' option
+ mi = mountinfo_find_by_filesystem_super_option(root, "cgroup2", "rw");
+ if (!mi) {
mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup2", "cgroup");
- if(mi)
- netdata_log_debug(D_CGROUP, "found unified cgroup root using mountsource info, with path: '%s'", mi->mount_point);
}
- if(!mi) {
+ if (!mi) {
collector_error("CGROUP: cannot find cgroup2 mountinfo. Assuming default: /sys/fs/cgroup");
s = "/sys/fs/cgroup";
- }
- else s = mi->mount_point;
+ } else
+ s = mi->mount_point;
+
set_cgroup_base_path(filename, s);
cgroup_unified_base = config_get("plugin:cgroups", "path to unified cgroups", filename);
- netdata_log_debug(D_CGROUP, "using cgroup root: '%s'", cgroup_unified_base);
}
cgroup_root_max = (int)config_get_number("plugin:cgroups", "max cgroups to allow", cgroup_root_max);
@@ -621,395 +583,6 @@ end_init_shm:
shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
}
-// ----------------------------------------------------------------------------
-// cgroup objects
-
-struct blkio {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
- int delay_counter;
-
- char *filename;
-
- unsigned long long Read;
- unsigned long long Write;
-/*
- unsigned long long Sync;
- unsigned long long Async;
- unsigned long long Total;
-*/
-};
-
-// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
-struct memory {
- ARL_BASE *arl_base;
- ARL_ENTRY *arl_dirty;
- ARL_ENTRY *arl_swap;
-
- int updated_detailed;
- int updated_usage_in_bytes;
- int updated_msw_usage_in_bytes;
- int updated_failcnt;
-
- int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
- int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
- int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
- int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- int delay_counter_detailed;
- int delay_counter_failcnt;
-
- char *filename_detailed;
- char *filename_usage_in_bytes;
- char *filename_msw_usage_in_bytes;
- char *filename_failcnt;
-
- int detailed_has_dirty;
- int detailed_has_swap;
-
- // detailed metrics
-/*
- unsigned long long cache;
- unsigned long long rss;
- unsigned long long rss_huge;
- unsigned long long mapped_file;
- unsigned long long writeback;
- unsigned long long dirty;
- unsigned long long swap;
- unsigned long long pgpgin;
- unsigned long long pgpgout;
- unsigned long long pgfault;
- unsigned long long pgmajfault;
- unsigned long long inactive_anon;
- unsigned long long active_anon;
- unsigned long long inactive_file;
- unsigned long long active_file;
- unsigned long long unevictable;
- unsigned long long hierarchical_memory_limit;
-*/
- //unified cgroups metrics
- unsigned long long anon;
- unsigned long long kernel_stack;
- unsigned long long slab;
- unsigned long long sock;
- unsigned long long shmem;
- unsigned long long anon_thp;
- //unsigned long long file_writeback;
- //unsigned long long file_dirty;
- //unsigned long long file;
-
- unsigned long long total_cache;
- unsigned long long total_rss;
- unsigned long long total_rss_huge;
- unsigned long long total_mapped_file;
- unsigned long long total_writeback;
- unsigned long long total_dirty;
- unsigned long long total_swap;
- unsigned long long total_pgpgin;
- unsigned long long total_pgpgout;
- unsigned long long total_pgfault;
- unsigned long long total_pgmajfault;
-/*
- unsigned long long total_inactive_anon;
- unsigned long long total_active_anon;
-*/
-
- unsigned long long total_inactive_file;
-
-/*
- unsigned long long total_active_file;
- unsigned long long total_unevictable;
-*/
-
- // single file metrics
- unsigned long long usage_in_bytes;
- unsigned long long msw_usage_in_bytes;
- unsigned long long failcnt;
-};
-
-// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
-struct cpuacct_stat {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- char *filename;
-
- unsigned long long user; // v1, v2(user_usec)
- unsigned long long system; // v1, v2(system_usec)
-};
-
-// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
-struct cpuacct_usage {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- char *filename;
-
- unsigned int cpus;
- unsigned long long *cpu_percpu;
-};
-
-// represents cpuacct/cpu.stat, for v2 'cpuacct_stat' is used for 'user_usec', 'system_usec'
-struct cpuacct_cpu_throttling {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- char *filename;
-
- unsigned long long nr_periods;
- unsigned long long nr_throttled;
- unsigned long long throttled_time;
-
- unsigned long long nr_throttled_perc;
-};
-
-// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu#sect-cfs
-// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_monitoring_and_updating_the_kernel/using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications_managing-monitoring-and-updating-the-kernel#proc_controlling-distribution-of-cpu-time-for-applications-by-adjusting-cpu-weight_using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications
-struct cpuacct_cpu_shares {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- char *filename;
-
- unsigned long long shares;
-};
-
-struct cgroup_network_interface {
- const char *host_device;
- const char *container_device;
- struct cgroup_network_interface *next;
-};
-
-enum cgroups_container_orchestrator {
- CGROUPS_ORCHESTRATOR_UNSET,
- CGROUPS_ORCHESTRATOR_UNKNOWN,
- CGROUPS_ORCHESTRATOR_K8S
-};
-
-// *** WARNING *** The fields are not thread safe. Take care of safe usage.
-struct cgroup {
- uint32_t options;
-
- int first_time_seen; // first time seen by the discoverer
- int processed; // the discoverer is done processing a cgroup (resolved name, set 'enabled' option)
-
- char available; // found in the filesystem
- char enabled; // enabled in the config
-
- char pending_renames;
-
- char *id;
- uint32_t hash;
-
- char *intermediate_id; // TODO: remove it when the renaming script is fixed
-
- char *chart_id;
- uint32_t hash_chart_id;
-
- // 'cgroup_name' label value.
- // By default this is the *id (path), later changed to the resolved name (cgroup-name.sh) or systemd service name.
- char *name;
-
- RRDLABELS *chart_labels;
-
- int container_orchestrator;
-
- struct cpuacct_stat cpuacct_stat;
- struct cpuacct_usage cpuacct_usage;
- struct cpuacct_cpu_throttling cpuacct_cpu_throttling;
- struct cpuacct_cpu_shares cpuacct_cpu_shares;
-
- struct memory memory;
-
- struct blkio io_service_bytes; // bytes
- struct blkio io_serviced; // operations
-
- struct blkio throttle_io_service_bytes; // bytes
- struct blkio throttle_io_serviced; // operations
-
- struct blkio io_merged; // operations
- struct blkio io_queued; // operations
-
- struct cgroup_network_interface *interfaces;
-
- struct pressure cpu_pressure;
- struct pressure io_pressure;
- struct pressure memory_pressure;
- struct pressure irq_pressure;
-
- // per cgroup charts
- RRDSET *st_cpu;
- RRDSET *st_cpu_limit;
- RRDSET *st_cpu_per_core;
- RRDSET *st_cpu_nr_throttled;
- RRDSET *st_cpu_throttled_time;
- RRDSET *st_cpu_shares;
-
- RRDSET *st_mem;
- RRDSET *st_mem_utilization;
- RRDSET *st_writeback;
- RRDSET *st_mem_activity;
- RRDSET *st_pgfaults;
- RRDSET *st_mem_usage;
- RRDSET *st_mem_usage_limit;
- RRDSET *st_mem_failcnt;
-
- RRDSET *st_io;
- RRDSET *st_serviced_ops;
- RRDSET *st_throttle_io;
- RRDSET *st_throttle_serviced_ops;
- RRDSET *st_queued_ops;
- RRDSET *st_merged_ops;
-
- // per cgroup chart variables
- char *filename_cpuset_cpus;
- unsigned long long cpuset_cpus;
-
- char *filename_cpu_cfs_period;
- unsigned long long cpu_cfs_period;
-
- char *filename_cpu_cfs_quota;
- unsigned long long cpu_cfs_quota;
-
- const RRDSETVAR_ACQUIRED *chart_var_cpu_limit;
- NETDATA_DOUBLE prev_cpu_usage;
-
- char *filename_memory_limit;
- unsigned long long memory_limit;
- const RRDSETVAR_ACQUIRED *chart_var_memory_limit;
-
- char *filename_memoryswap_limit;
- unsigned long long memoryswap_limit;
- const RRDSETVAR_ACQUIRED *chart_var_memoryswap_limit;
-
- struct cgroup *next;
- struct cgroup *discovered_next;
-
-} *cgroup_root = NULL;
-
-uv_mutex_t cgroup_root_mutex;
-
-struct cgroup *discovered_cgroup_root = NULL;
-
-struct discovery_thread {
- uv_thread_t thread;
- uv_mutex_t mutex;
- uv_cond_t cond_var;
- int exited;
-} discovery_thread;
-
-// ---------------------------------------------------------------------------------------------
-
-static inline int matches_enabled_cgroup_paths(char *id) {
- return simple_pattern_matches(enabled_cgroup_paths, id);
-}
-
-static inline int matches_enabled_cgroup_names(char *name) {
- return simple_pattern_matches(enabled_cgroup_names, name);
-}
-
-static inline int matches_enabled_cgroup_renames(char *id) {
- return simple_pattern_matches(enabled_cgroup_renames, id);
-}
-
-static inline int matches_systemd_services_cgroups(char *id) {
- return simple_pattern_matches(systemd_services_cgroups, id);
-}
-
-static inline int matches_search_cgroup_paths(const char *dir) {
- return simple_pattern_matches(search_cgroup_paths, dir);
-}
-
-static inline int matches_entrypoint_parent_process_comm(const char *comm) {
- return simple_pattern_matches(entrypoint_parent_process_comm, comm);
-}
-
-static inline int is_cgroup_systemd_service(struct cgroup *cg) {
- return (cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE);
-}
-
-// ---------------------------------------------------------------------------------------------
-static int k8s_is_kubepod(struct cgroup *cg) {
- return cg->container_orchestrator == CGROUPS_ORCHESTRATOR_K8S;
-}
-
-static int k8s_is_container(const char *id) {
- // examples:
- // https://github.com/netdata/netdata/blob/0fc101679dcd12f1cb8acdd07bb4c85d8e553e53/collectors/cgroups.plugin/cgroup-name.sh#L121-L147
- const char *p = id;
- const char *pp = NULL;
- int i = 0;
- size_t l = 3; // pod
- while ((p = strstr(p, "pod"))) {
- i++;
- p += l;
- pp = p;
- }
- return !(i < 2 || !pp || !(pp = strchr(pp, '/')) || !pp++ || !*pp);
-}
-
-#define TASK_COMM_LEN 16
-
-static int k8s_get_container_first_proc_comm(const char *id, char *comm) {
- if (!k8s_is_container(id)) {
- return 1;
- }
-
- static procfile *ff = NULL;
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/%s/cgroup.procs", cgroup_cpuacct_base, id);
-
- ff = procfile_reopen(ff, filename, NULL, CGROUP_PROCFILE_FLAG);
- if (unlikely(!ff)) {
- netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename);
- return 1;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff)) {
- netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename);
- return 1;
- }
-
- unsigned long lines = procfile_lines(ff);
- if (likely(lines < 2)) {
- return 1;
- }
-
- char *pid = procfile_lineword(ff, 0, 0);
- if (!pid || !*pid) {
- return 1;
- }
-
- snprintfz(filename, FILENAME_MAX, "%s/proc/%s/comm", netdata_configured_host_prefix, pid);
-
- ff = procfile_reopen(ff, filename, NULL, PROCFILE_FLAG_DEFAULT);
- if (unlikely(!ff)) {
- netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename);
- return 1;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff)) {
- netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename);
- return 1;
- }
-
- lines = procfile_lines(ff);
- if (unlikely(lines != 2)) {
- return 1;
- }
-
- char *proc_comm = procfile_lineword(ff, 0, 0);
- if (!proc_comm || !*proc_comm) {
- return 1;
- }
-
- strncpyz(comm, proc_comm, TASK_COMM_LEN);
- return 0;
-}
-
// ---------------------------------------------------------------------------------------------
static unsigned long long calc_delta(unsigned long long curr, unsigned long long prev) {
@@ -1023,16 +596,7 @@ static unsigned long long calc_percentage(unsigned long long value, unsigned lon
if (total == 0) {
return 0;
}
- return (NETDATA_DOUBLE)value / (NETDATA_DOUBLE)total * 100;
-}
-
-static int calc_cgroup_depth(const char *id) {
- int depth = 0;
- const char *s;
- for (s = id; *s; s++) {
- depth += unlikely(*s == '/');
- }
- return depth;
+ return (unsigned long long)((NETDATA_DOUBLE)value / (NETDATA_DOUBLE)total * 100);
}
// ----------------------------------------------------------------------------
@@ -1596,6 +1160,15 @@ memory_next:
}
}
+static void cgroup_read_pids_current(struct pids *pids) {
+ pids->pids_current_updated = 0;
+
+ if (unlikely(!pids->pids_current_filename))
+ return;
+
+ pids->pids_current_updated = !read_single_number_file(pids->pids_current_filename, &pids->pids_current);
+}
+
static inline void read_cgroup(struct cgroup *cg) {
netdata_log_debug(D_CGROUP, "reading metrics for cgroups '%s'", cg->id);
if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
@@ -1610,6 +1183,7 @@ static inline void read_cgroup(struct cgroup *cg) {
cgroup_read_blkio(&cg->throttle_io_serviced);
cgroup_read_blkio(&cg->io_merged);
cgroup_read_blkio(&cg->io_queued);
+ cgroup_read_pids_current(&cg->pids);
}
else {
//TODO: io_service_bytes and io_serviced use same file merge into 1 function
@@ -1622,6 +1196,7 @@ static inline void read_cgroup(struct cgroup *cg) {
cgroup2_read_pressure(&cg->memory_pressure);
cgroup2_read_pressure(&cg->irq_pressure);
cgroup_read_memory(&cg->memory, 1);
+ cgroup_read_pids_current(&cg->pids);
}
}
@@ -1636,1583 +1211,7 @@ static inline void read_all_discovered_cgroups(struct cgroup *root) {
}
}
-// ----------------------------------------------------------------------------
-// cgroup network interfaces
-
-#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
-static inline void read_cgroup_network_interfaces(struct cgroup *cg) {
- netdata_log_debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id);
-
- pid_t cgroup_pid;
- char cgroup_identifier[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
-
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_cpuacct_base, cg->id);
- }
- else {
- snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_unified_base, cg->id);
- }
-
- netdata_log_debug(D_CGROUP, "executing cgroup_identifier %s --cgroup '%s' for cgroup '%s'", cgroups_network_interface_script, cgroup_identifier, cg->id);
- FILE *fp_child_input, *fp_child_output;
- (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_network_interface_script, "--cgroup", cgroup_identifier);
- if(!fp_child_output) {
- collector_error("CGROUP: cannot popen(%s --cgroup \"%s\", \"r\").", cgroups_network_interface_script, cgroup_identifier);
- return;
- }
-
- char *s;
- char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
- while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp_child_output))) {
- trim(s);
-
- if(*s && *s != '\n') {
- char *t = s;
- while(*t && *t != ' ') t++;
- if(*t == ' ') {
- *t = '\0';
- t++;
- }
-
- if(!*s) {
- collector_error("CGROUP: empty host interface returned by script");
- continue;
- }
-
- if(!*t) {
- collector_error("CGROUP: empty guest interface returned by script");
- continue;
- }
-
- struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface));
- i->host_device = strdupz(s);
- i->container_device = strdupz(t);
- i->next = cg->interfaces;
- cg->interfaces = i;
-
- collector_info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device);
-
- // register a device rename to proc_net_dev.c
- netdev_rename_device_add(
- i->host_device, i->container_device, cg->chart_id, cg->chart_labels, k8s_is_kubepod(cg) ? "k8s." : "");
- }
- }
-
- netdata_pclose(fp_child_input, fp_child_output, cgroup_pid);
- // netdata_log_debug(D_CGROUP, "closed cgroup_identifier for cgroup '%s'", cg->id);
-}
-
-static inline void free_cgroup_network_interfaces(struct cgroup *cg) {
- while(cg->interfaces) {
- struct cgroup_network_interface *i = cg->interfaces;
- cg->interfaces = i->next;
-
- // delete the registration of proc_net_dev rename
- netdev_rename_device_del(i->host_device);
-
- freez((void *)i->host_device);
- freez((void *)i->container_device);
- freez((void *)i);
- }
-}
-
-// ----------------------------------------------------------------------------
-// add/remove/find cgroup objects
-
-#define CGROUP_CHARTID_LINE_MAX 1024
-
-static inline char *cgroup_chart_id_strdupz(const char *s) {
- if(!s || !*s) s = "/";
-
- if(*s == '/' && s[1] != '\0') s++;
-
- char *r = strdupz(s);
- netdata_fix_chart_id(r);
-
- return r;
-}
-
-// TODO: move the code to cgroup_chart_id_strdupz() when the renaming script is fixed
-static inline void substitute_dots_in_id(char *s) {
- // dots are used to distinguish chart type and id in streaming, so we should replace them
- for (char *d = s; *d; d++) {
- if (*d == '.')
- *d = '-';
- }
-}
-
-// ----------------------------------------------------------------------------
-// parse k8s labels
-
-char *cgroup_parse_resolved_name_and_labels(RRDLABELS *labels, char *data) {
- // the first word, up to the first space is the name
- char *name = strsep_skip_consecutive_separators(&data, " ");
-
- // the rest are key=value pairs separated by comma
- while(data) {
- char *pair = strsep_skip_consecutive_separators(&data, ",");
- rrdlabels_add_pair(labels, pair, RRDLABEL_SRC_AUTO | RRDLABEL_SRC_K8S);
- }
-
- return name;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline void free_pressure(struct pressure *res) {
- if (res->some.share_time.st) rrdset_is_obsolete(res->some.share_time.st);
- if (res->some.total_time.st) rrdset_is_obsolete(res->some.total_time.st);
- if (res->full.share_time.st) rrdset_is_obsolete(res->full.share_time.st);
- if (res->full.total_time.st) rrdset_is_obsolete(res->full.total_time.st);
- freez(res->filename);
-}
-
-static inline void cgroup_free(struct cgroup *cg) {
- netdata_log_debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available");
-
- if(cg->st_cpu) rrdset_is_obsolete(cg->st_cpu);
- if(cg->st_cpu_limit) rrdset_is_obsolete(cg->st_cpu_limit);
- if(cg->st_cpu_per_core) rrdset_is_obsolete(cg->st_cpu_per_core);
- if(cg->st_cpu_nr_throttled) rrdset_is_obsolete(cg->st_cpu_nr_throttled);
- if(cg->st_cpu_throttled_time) rrdset_is_obsolete(cg->st_cpu_throttled_time);
- if(cg->st_cpu_shares) rrdset_is_obsolete(cg->st_cpu_shares);
- if(cg->st_mem) rrdset_is_obsolete(cg->st_mem);
- if(cg->st_writeback) rrdset_is_obsolete(cg->st_writeback);
- if(cg->st_mem_activity) rrdset_is_obsolete(cg->st_mem_activity);
- if(cg->st_pgfaults) rrdset_is_obsolete(cg->st_pgfaults);
- if(cg->st_mem_usage) rrdset_is_obsolete(cg->st_mem_usage);
- if(cg->st_mem_usage_limit) rrdset_is_obsolete(cg->st_mem_usage_limit);
- if(cg->st_mem_utilization) rrdset_is_obsolete(cg->st_mem_utilization);
- if(cg->st_mem_failcnt) rrdset_is_obsolete(cg->st_mem_failcnt);
- if(cg->st_io) rrdset_is_obsolete(cg->st_io);
- if(cg->st_serviced_ops) rrdset_is_obsolete(cg->st_serviced_ops);
- if(cg->st_throttle_io) rrdset_is_obsolete(cg->st_throttle_io);
- if(cg->st_throttle_serviced_ops) rrdset_is_obsolete(cg->st_throttle_serviced_ops);
- if(cg->st_queued_ops) rrdset_is_obsolete(cg->st_queued_ops);
- if(cg->st_merged_ops) rrdset_is_obsolete(cg->st_merged_ops);
-
- freez(cg->filename_cpuset_cpus);
- freez(cg->filename_cpu_cfs_period);
- freez(cg->filename_cpu_cfs_quota);
- freez(cg->filename_memory_limit);
- freez(cg->filename_memoryswap_limit);
-
- free_cgroup_network_interfaces(cg);
-
- freez(cg->cpuacct_usage.cpu_percpu);
-
- freez(cg->cpuacct_stat.filename);
- freez(cg->cpuacct_usage.filename);
- freez(cg->cpuacct_cpu_throttling.filename);
- freez(cg->cpuacct_cpu_shares.filename);
-
- arl_free(cg->memory.arl_base);
- freez(cg->memory.filename_detailed);
- freez(cg->memory.filename_failcnt);
- freez(cg->memory.filename_usage_in_bytes);
- freez(cg->memory.filename_msw_usage_in_bytes);
-
- freez(cg->io_service_bytes.filename);
- freez(cg->io_serviced.filename);
-
- freez(cg->throttle_io_service_bytes.filename);
- freez(cg->throttle_io_serviced.filename);
-
- freez(cg->io_merged.filename);
- freez(cg->io_queued.filename);
-
- free_pressure(&cg->cpu_pressure);
- free_pressure(&cg->io_pressure);
- free_pressure(&cg->memory_pressure);
- free_pressure(&cg->irq_pressure);
-
- freez(cg->id);
- freez(cg->intermediate_id);
- freez(cg->chart_id);
- freez(cg->name);
-
- rrdlabels_destroy(cg->chart_labels);
-
- freez(cg);
-
- cgroup_root_count--;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline void discovery_rename_cgroup(struct cgroup *cg) {
- if (!cg->pending_renames) {
- return;
- }
- cg->pending_renames--;
-
- netdata_log_debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id);
- netdata_log_debug(D_CGROUP, "executing command %s \"%s\" for cgroup '%s'", cgroups_rename_script, cg->intermediate_id, cg->chart_id);
- pid_t cgroup_pid;
-
- FILE *fp_child_input, *fp_child_output;
- (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_rename_script, cg->id, cg->intermediate_id);
- if (!fp_child_output) {
- collector_error("CGROUP: cannot popen(%s \"%s\", \"r\").", cgroups_rename_script, cg->intermediate_id);
- cg->pending_renames = 0;
- cg->processed = 1;
- return;
- }
-
- char buffer[CGROUP_CHARTID_LINE_MAX + 1];
- char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp_child_output);
- int exit_code = netdata_pclose(fp_child_input, fp_child_output, cgroup_pid);
-
- switch (exit_code) {
- case 0:
- cg->pending_renames = 0;
- break;
-
- case 3:
- cg->pending_renames = 0;
- cg->processed = 1;
- break;
- }
-
- if (cg->pending_renames || cg->processed)
- return;
- if (!new_name || !*new_name || *new_name == '\n')
- return;
- if (!(new_name = trim(new_name)))
- return;
-
- char *name = new_name;
-
- if (!cg->chart_labels)
- cg->chart_labels = rrdlabels_create();
- // read the new labels and remove the obsolete ones
- rrdlabels_unmark_all(cg->chart_labels);
- name = cgroup_parse_resolved_name_and_labels(cg->chart_labels, new_name);
- rrdlabels_remove_all_unmarked(cg->chart_labels);
-
- freez(cg->name);
- cg->name = strdupz(name);
-
- freez(cg->chart_id);
- cg->chart_id = cgroup_chart_id_strdupz(name);
-
- substitute_dots_in_id(cg->chart_id);
- cg->hash_chart_id = simple_hash(cg->chart_id);
-}
-
-static void is_cgroup_procs_exist(netdata_ebpf_cgroup_shm_body_t *out, char *id) {
- struct stat buf;
-
- snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_cpuset_base, id);
- if (likely(stat(out->path, &buf) == 0)) {
- return;
- }
-
- snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_blkio_base, id);
- if (likely(stat(out->path, &buf) == 0)) {
- return;
- }
-
- snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_memory_base, id);
- if (likely(stat(out->path, &buf) == 0)) {
- return;
- }
-
- snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_devices_base, id);
- if (likely(stat(out->path, &buf) == 0)) {
- return;
- }
-
- out->path[0] = '\0';
- out->enabled = 0;
-}
-
-static inline void convert_cgroup_to_systemd_service(struct cgroup *cg) {
- char buffer[CGROUP_CHARTID_LINE_MAX + 1];
- cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
- strncpyz(buffer, cg->id, CGROUP_CHARTID_LINE_MAX);
- char *s = buffer;
-
- // skip to the last slash
- size_t len = strlen(s);
- while (len--) {
- if (unlikely(s[len] == '/')) {
- break;
- }
- }
- if (len) {
- s = &s[len + 1];
- }
-
- // remove extension
- len = strlen(s);
- while (len--) {
- if (unlikely(s[len] == '.')) {
- break;
- }
- }
- if (len) {
- s[len] = '\0';
- }
-
- freez(cg->name);
- cg->name = strdupz(s);
-
- freez(cg->chart_id);
- cg->chart_id = cgroup_chart_id_strdupz(s);
- substitute_dots_in_id(cg->chart_id);
- cg->hash_chart_id = simple_hash(cg->chart_id);
-}
-
-static inline struct cgroup *discovery_cgroup_add(const char *id) {
- netdata_log_debug(D_CGROUP, "adding to list, cgroup with id '%s'", id);
-
- struct cgroup *cg = callocz(1, sizeof(struct cgroup));
-
- cg->id = strdupz(id);
- cg->hash = simple_hash(cg->id);
-
- cg->name = strdupz(id);
-
- cg->intermediate_id = cgroup_chart_id_strdupz(id);
-
- cg->chart_id = cgroup_chart_id_strdupz(id);
- substitute_dots_in_id(cg->chart_id);
- cg->hash_chart_id = simple_hash(cg->chart_id);
-
- if (cgroup_use_unified_cgroups) {
- cg->options |= CGROUP_OPTIONS_IS_UNIFIED;
- }
-
- if (!discovered_cgroup_root)
- discovered_cgroup_root = cg;
- else {
- struct cgroup *t;
- for (t = discovered_cgroup_root; t->discovered_next; t = t->discovered_next) {
- }
- t->discovered_next = cg;
- }
-
- return cg;
-}
-
-static inline struct cgroup *discovery_cgroup_find(const char *id) {
- netdata_log_debug(D_CGROUP, "searching for cgroup '%s'", id);
-
- uint32_t hash = simple_hash(id);
-
- struct cgroup *cg;
- for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
- if(hash == cg->hash && strcmp(id, cg->id) == 0)
- break;
- }
-
- netdata_log_debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found");
- return cg;
-}
-
-static inline void discovery_find_cgroup_in_dir_callback(const char *dir) {
- if (!dir || !*dir) {
- dir = "/";
- }
- netdata_log_debug(D_CGROUP, "examining cgroup dir '%s'", dir);
-
- struct cgroup *cg = discovery_cgroup_find(dir);
- if (cg) {
- cg->available = 1;
- return;
- }
-
- if (cgroup_root_count >= cgroup_root_max) {
- collector_info("CGROUP: maximum number of cgroups reached (%d). Not adding cgroup '%s'", cgroup_root_count, dir);
- return;
- }
-
- if (cgroup_max_depth > 0) {
- int depth = calc_cgroup_depth(dir);
- if (depth > cgroup_max_depth) {
- collector_info("CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth);
- return;
- }
- }
-
- cg = discovery_cgroup_add(dir);
- cg->available = 1;
- cg->first_time_seen = 1;
- cgroup_root_count++;
-}
-
-static inline int discovery_find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) {
- if(!this) this = base;
- netdata_log_debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base);
-
- size_t dirlen = strlen(this), baselen = strlen(base);
-
- int ret = -1;
- int enabled = -1;
-
- const char *relative_path = &this[baselen];
- if(!*relative_path) relative_path = "/";
-
- DIR *dir = opendir(this);
- if(!dir) {
- collector_error("CGROUP: cannot read directory '%s'", base);
- return ret;
- }
- ret = 1;
-
- callback(relative_path);
-
- struct dirent *de = NULL;
- while((de = readdir(dir))) {
- if(de->d_type == DT_DIR
- && (
- (de->d_name[0] == '.' && de->d_name[1] == '\0')
- || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
- ))
- continue;
-
- if(de->d_type == DT_DIR) {
- if(enabled == -1) {
- const char *r = relative_path;
- if(*r == '\0') r = "/";
-
- // do not decent in directories we are not interested
- enabled = matches_search_cgroup_paths(r);
- }
-
- if(enabled) {
- char *s = mallocz(dirlen + strlen(de->d_name) + 2);
- strcpy(s, this);
- strcat(s, "/");
- strcat(s, de->d_name);
- int ret2 = discovery_find_dir_in_subdirs(base, s, callback);
- if(ret2 > 0) ret += ret2;
- freez(s);
- }
- }
- }
-
- closedir(dir);
- return ret;
-}
-
-static inline void discovery_mark_all_cgroups_as_unavailable() {
- netdata_log_debug(D_CGROUP, "marking all cgroups as not available");
- struct cgroup *cg;
- for (cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
- cg->available = 0;
- }
-}
-
-static inline void discovery_update_filenames() {
- struct cgroup *cg;
- struct stat buf;
- for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
- if(unlikely(!cg->available || !cg->enabled || cg->pending_renames))
- continue;
-
- netdata_log_debug(D_CGROUP, "checking paths for cgroup '%s'", cg->id);
-
- // check for newly added cgroups
- // and update the filenames they read
- char filename[FILENAME_MAX + 1];
- if(!cgroup_use_unified_cgroups) {
- if(unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_stat.filename = strdupz(filename);
- cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
- snprintfz(filename, FILENAME_MAX, "%s%s/cpuset.cpus", cgroup_cpuset_base, cg->id);
- cg->filename_cpuset_cpus = strdupz(filename);
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_period_us", cgroup_cpuacct_base, cg->id);
- cg->filename_cpu_cfs_period = strdupz(filename);
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_quota_us", cgroup_cpuacct_base, cg->id);
- cg->filename_cpu_cfs_quota = strdupz(filename);
- netdata_log_debug(D_CGROUP, "cpuacct.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename);
- }
- else
- netdata_log_debug(D_CGROUP, "cpuacct.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if(unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !is_cgroup_systemd_service(cg))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_usage.filename = strdupz(filename);
- cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage;
- netdata_log_debug(D_CGROUP, "cpuacct.usage_percpu filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_usage.filename);
- }
- else
- netdata_log_debug(D_CGROUP, "cpuacct.usage_percpu file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- if(unlikely(cgroup_enable_cpuacct_cpu_throttling && !cg->cpuacct_cpu_throttling.filename && !is_cgroup_systemd_service(cg))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_cpuacct_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_cpu_throttling.filename = strdupz(filename);
- cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling;
- netdata_log_debug(D_CGROUP, "cpu.stat filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_cpu_throttling.filename);
- }
- else
- netdata_log_debug(D_CGROUP, "cpu.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- if (unlikely(
- cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename &&
- !is_cgroup_systemd_service(cg))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.shares", cgroup_cpuacct_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_cpu_shares.filename = strdupz(filename);
- cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares;
- netdata_log_debug(
- D_CGROUP, "cpu.shares filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_cpu_shares.filename);
- } else
- netdata_log_debug(D_CGROUP, "cpu.shares file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed && (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_detailed = strdupz(filename);
- cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO;
- netdata_log_debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed);
- }
- else
- netdata_log_debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_usage_in_bytes = strdupz(filename);
- cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
- netdata_log_debug(D_CGROUP, "memory.usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes);
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.limit_in_bytes", cgroup_memory_base, cg->id);
- cg->filename_memory_limit = strdupz(filename);
- }
- else
- netdata_log_debug(D_CGROUP, "memory.usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.usage_in_bytes", cgroup_memory_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
- cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.limit_in_bytes", cgroup_memory_base, cg->id);
- cg->filename_memoryswap_limit = strdupz(filename);
- netdata_log_debug(D_CGROUP, "memory.msw_usage_in_bytes filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes);
- }
- else
- netdata_log_debug(D_CGROUP, "memory.msw_usage_in_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if(unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_failcnt = strdupz(filename);
- cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt;
- netdata_log_debug(D_CGROUP, "memory.failcnt filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_failcnt);
- }
- else
- netdata_log_debug(D_CGROUP, "memory.failcnt file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->io_service_bytes.filename = strdupz(filename);
- cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
- netdata_log_debug(D_CGROUP, "blkio.io_service_bytes_recursive filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.io_service_bytes_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_service_bytes.filename = strdupz(filename);
- cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
- netdata_log_debug(D_CGROUP, "blkio.io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- }
- }
-
- if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->io_serviced.filename = strdupz(filename);
- cg->io_serviced.enabled = cgroup_enable_blkio_ops;
- netdata_log_debug(D_CGROUP, "blkio.io_serviced_recursive filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.io_serviced_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_serviced.filename = strdupz(filename);
- cg->io_serviced.enabled = cgroup_enable_blkio_ops;
- netdata_log_debug(D_CGROUP, "blkio.io_serviced filename for cgroup '%s': '%s'", cg->id, cg->io_serviced.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- }
- }
-
- if (unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->throttle_io_service_bytes.filename = strdupz(filename);
- cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io;
- netdata_log_debug(D_CGROUP,"blkio.throttle.io_service_bytes_recursive filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.throttle.io_service_bytes_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- snprintfz(
- filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->throttle_io_service_bytes.filename = strdupz(filename);
- cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io;
- netdata_log_debug(D_CGROUP, "blkio.throttle.io_service_bytes filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_service_bytes.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.throttle.io_service_bytes file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- }
- }
-
- if (unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->throttle_io_serviced.filename = strdupz(filename);
- cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops;
- netdata_log_debug(D_CGROUP, "blkio.throttle.io_serviced_recursive filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.throttle.io_serviced_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->throttle_io_serviced.filename = strdupz(filename);
- cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops;
- netdata_log_debug(D_CGROUP, "blkio.throttle.io_serviced filename for cgroup '%s': '%s'", cg->id, cg->throttle_io_serviced.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.throttle.io_serviced file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- }
- }
-
- if (unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->io_merged.filename = strdupz(filename);
- cg->io_merged.enabled = cgroup_enable_blkio_merged_ops;
- netdata_log_debug(D_CGROUP, "blkio.io_merged_recursive filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.io_merged_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_merged.filename = strdupz(filename);
- cg->io_merged.enabled = cgroup_enable_blkio_merged_ops;
- netdata_log_debug(D_CGROUP, "blkio.io_merged filename for cgroup '%s': '%s'", cg->id, cg->io_merged.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.io_merged file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- }
- }
-
- if (unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->io_queued.filename = strdupz(filename);
- cg->io_queued.enabled = cgroup_enable_blkio_queued_ops;
- netdata_log_debug(D_CGROUP, "blkio.io_queued_recursive filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.io_queued_recursive file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_queued.filename = strdupz(filename);
- cg->io_queued.enabled = cgroup_enable_blkio_queued_ops;
- netdata_log_debug(D_CGROUP, "blkio.io_queued filename for cgroup '%s': '%s'", cg->id, cg->io_queued.filename);
- } else {
- netdata_log_debug(D_CGROUP, "blkio.io_queued file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- }
- }
- }
- else if(likely(cgroup_unified_exist)) {
- if(unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->io_service_bytes.filename = strdupz(filename);
- cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
- netdata_log_debug(D_CGROUP, "io.stat filename for unified cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
- } else
- netdata_log_debug(D_CGROUP, "io.stat file for unified cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_serviced.filename = strdupz(filename);
- cg->io_serviced.enabled = cgroup_enable_blkio_ops;
- netdata_log_debug(D_CGROUP, "io.stat filename for unified cgroup '%s': '%s'", cg->id, cg->io_service_bytes.filename);
- } else
- netdata_log_debug(D_CGROUP, "io.stat file for unified cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- if (unlikely(
- (cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_cpu_throttling) &&
- !cg->cpuacct_stat.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_unified_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_stat.filename = strdupz(filename);
- cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
- cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling;
- cg->filename_cpuset_cpus = NULL;
- cg->filename_cpu_cfs_period = NULL;
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.max", cgroup_unified_base, cg->id);
- cg->filename_cpu_cfs_quota = strdupz(filename);
- netdata_log_debug(D_CGROUP, "cpu.stat filename for unified cgroup '%s': '%s'", cg->id, cg->cpuacct_stat.filename);
- }
- else
- netdata_log_debug(D_CGROUP, "cpu.stat file for unified cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
- if (unlikely(cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.weight", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_cpu_shares.filename = strdupz(filename);
- cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares;
- netdata_log_debug(D_CGROUP, "cpu.weight filename for cgroup '%s': '%s'", cg->id, cg->cpuacct_cpu_shares.filename);
- } else
- netdata_log_debug(D_CGROUP, "cpu.weight file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if(unlikely((cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed && (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_unified_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_detailed = strdupz(filename);
- cg->memory.enabled_detailed = (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_AUTO;
- netdata_log_debug(D_CGROUP, "memory.stat filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_detailed);
- }
- else
- netdata_log_debug(D_CGROUP, "memory.stat file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if(unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.current", cgroup_unified_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_usage_in_bytes = strdupz(filename);
- cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
- netdata_log_debug(D_CGROUP, "memory.current filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_usage_in_bytes);
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.max", cgroup_unified_base, cg->id);
- cg->filename_memory_limit = strdupz(filename);
- }
- else
- netdata_log_debug(D_CGROUP, "memory.current file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if(unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.current", cgroup_unified_base, cg->id);
- if(likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
- cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.max", cgroup_unified_base, cg->id);
- cg->filename_memoryswap_limit = strdupz(filename);
- netdata_log_debug(D_CGROUP, "memory.swap.current filename for cgroup '%s': '%s'", cg->id, cg->memory.filename_msw_usage_in_bytes);
- }
- else
- netdata_log_debug(D_CGROUP, "memory.swap file for cgroup '%s': '%s' does not exist.", cg->id, filename);
- }
-
- if (unlikely(cgroup_enable_pressure_cpu && !cg->cpu_pressure.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.pressure", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpu_pressure.filename = strdupz(filename);
- cg->cpu_pressure.some.enabled = cgroup_enable_pressure_cpu;
- cg->cpu_pressure.full.enabled = CONFIG_BOOLEAN_NO;
- netdata_log_debug(D_CGROUP, "cpu.pressure filename for cgroup '%s': '%s'", cg->id, cg->cpu_pressure.filename);
- } else {
- netdata_log_debug(D_CGROUP, "cpu.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
- }
- }
-
- if (unlikely((cgroup_enable_pressure_io_some || cgroup_enable_pressure_io_full) && !cg->io_pressure.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/io.pressure", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_pressure.filename = strdupz(filename);
- cg->io_pressure.some.enabled = cgroup_enable_pressure_io_some;
- cg->io_pressure.full.enabled = cgroup_enable_pressure_io_full;
- netdata_log_debug(D_CGROUP, "io.pressure filename for cgroup '%s': '%s'", cg->id, cg->io_pressure.filename);
- } else {
- netdata_log_debug(D_CGROUP, "io.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
- }
- }
-
- if (unlikely((cgroup_enable_pressure_memory_some || cgroup_enable_pressure_memory_full) && !cg->memory_pressure.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.pressure", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->memory_pressure.filename = strdupz(filename);
- cg->memory_pressure.some.enabled = cgroup_enable_pressure_memory_some;
- cg->memory_pressure.full.enabled = cgroup_enable_pressure_memory_full;
- netdata_log_debug(D_CGROUP, "memory.pressure filename for cgroup '%s': '%s'", cg->id, cg->memory_pressure.filename);
- } else {
- netdata_log_debug(D_CGROUP, "memory.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
- }
- }
-
- if (unlikely((cgroup_enable_pressure_irq_some || cgroup_enable_pressure_irq_full) && !cg->irq_pressure.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/irq.pressure", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->irq_pressure.filename = strdupz(filename);
- cg->irq_pressure.some.enabled = cgroup_enable_pressure_irq_some;
- cg->irq_pressure.full.enabled = cgroup_enable_pressure_irq_full;
- netdata_log_debug(D_CGROUP, "irq.pressure filename for cgroup '%s': '%s'", cg->id, cg->irq_pressure.filename);
- } else {
- netdata_log_debug(D_CGROUP, "irq.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
- }
- }
- }
- }
-}
-
-static inline void discovery_cleanup_all_cgroups() {
- struct cgroup *cg = discovered_cgroup_root, *last = NULL;
-
- for(; cg ;) {
- if(!cg->available) {
- // enable the first duplicate cgroup
- {
- struct cgroup *t;
- for (t = discovered_cgroup_root; t; t = t->discovered_next) {
- if (t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE &&
- (is_cgroup_systemd_service(t) == is_cgroup_systemd_service(cg)) &&
- t->hash_chart_id == cg->hash_chart_id && !strcmp(t->chart_id, cg->chart_id)) {
- netdata_log_debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id);
- t->enabled = 1;
- t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE;
- break;
- }
- }
- }
-
- if(!last)
- discovered_cgroup_root = cg->discovered_next;
- else
- last->discovered_next = cg->discovered_next;
-
- cgroup_free(cg);
-
- if(!last)
- cg = discovered_cgroup_root;
- else
- cg = last->discovered_next;
- }
- else {
- last = cg;
- cg = cg->discovered_next;
- }
- }
-}
-
-static inline void discovery_copy_discovered_cgroups_to_reader() {
- netdata_log_debug(D_CGROUP, "copy discovered cgroups to the main group list");
-
- struct cgroup *cg;
-
- for (cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
- cg->next = cg->discovered_next;
- }
-
- cgroup_root = discovered_cgroup_root;
-}
-
-static inline void discovery_share_cgroups_with_ebpf() {
- struct cgroup *cg;
- int count;
- struct stat buf;
-
- if (shm_mutex_cgroup_ebpf == SEM_FAILED) {
- return;
- }
- sem_wait(shm_mutex_cgroup_ebpf);
-
- for (cg = cgroup_root, count = 0; cg; cg = cg->next, count++) {
- netdata_ebpf_cgroup_shm_body_t *ptr = &shm_cgroup_ebpf.body[count];
- char *prefix = (is_cgroup_systemd_service(cg)) ? services_chart_id_prefix : cgroup_chart_id_prefix;
- snprintfz(ptr->name, CGROUP_EBPF_NAME_SHARED_LENGTH - 1, "%s%s", prefix, cg->chart_id);
- ptr->hash = simple_hash(ptr->name);
- ptr->options = cg->options;
- ptr->enabled = cg->enabled;
- if (cgroup_use_unified_cgroups) {
- snprintfz(ptr->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_unified_base, cg->id);
- if (likely(stat(ptr->path, &buf) == -1)) {
- ptr->path[0] = '\0';
- ptr->enabled = 0;
- }
- } else {
- is_cgroup_procs_exist(ptr, cg->id);
- }
-
- netdata_log_debug(D_CGROUP, "cgroup shared: NAME=%s, ENABLED=%d", ptr->name, ptr->enabled);
- }
-
- shm_cgroup_ebpf.header->cgroup_root_count = count;
- sem_post(shm_mutex_cgroup_ebpf);
-}
-
-static inline void discovery_find_all_cgroups_v1() {
- if (cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) {
- if (discovery_find_dir_in_subdirs(cgroup_cpuacct_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_enable_cpuacct_stat = cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
- collector_error("CGROUP: disabled cpu statistics.");
- }
- }
-
- if (cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io ||
- cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) {
- if (discovery_find_dir_in_subdirs(cgroup_blkio_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_enable_blkio_io = cgroup_enable_blkio_ops = cgroup_enable_blkio_throttle_io =
- cgroup_enable_blkio_throttle_ops = cgroup_enable_blkio_merged_ops = cgroup_enable_blkio_queued_ops =
- CONFIG_BOOLEAN_NO;
- collector_error("CGROUP: disabled blkio statistics.");
- }
- }
-
- if (cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) {
- if (discovery_find_dir_in_subdirs(cgroup_memory_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_enable_memory = cgroup_enable_detailed_memory = cgroup_enable_swap = cgroup_enable_memory_failcnt =
- CONFIG_BOOLEAN_NO;
- collector_error("CGROUP: disabled memory statistics.");
- }
- }
-
- if (cgroup_search_in_devices) {
- if (discovery_find_dir_in_subdirs(cgroup_devices_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_search_in_devices = 0;
- collector_error("CGROUP: disabled devices statistics.");
- }
- }
-}
-
-static inline void discovery_find_all_cgroups_v2() {
- if (discovery_find_dir_in_subdirs(cgroup_unified_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_unified_exist = CONFIG_BOOLEAN_NO;
- collector_error("CGROUP: disabled unified cgroups statistics.");
- }
-}
-
-static int is_digits_only(const char *s) {
- do {
- if (!isdigit(*s++)) {
- return 0;
- }
- } while (*s);
-
- return 1;
-}
-
-static inline void discovery_process_first_time_seen_cgroup(struct cgroup *cg) {
- if (!cg->first_time_seen) {
- return;
- }
- cg->first_time_seen = 0;
-
- char comm[TASK_COMM_LEN + 1];
-
- if (cg->container_orchestrator == CGROUPS_ORCHESTRATOR_UNSET) {
- if (strstr(cg->id, "kubepods")) {
- cg->container_orchestrator = CGROUPS_ORCHESTRATOR_K8S;
- } else {
- cg->container_orchestrator = CGROUPS_ORCHESTRATOR_UNKNOWN;
- }
- }
-
- if (is_inside_k8s && !k8s_get_container_first_proc_comm(cg->id, comm)) {
- // container initialization may take some time when CPU % is high
- // seen on GKE: comm is '6' before 'runc:[2:INIT]' (dunno if it could be another number)
- if (is_digits_only(comm) || matches_entrypoint_parent_process_comm(comm)) {
- cg->first_time_seen = 1;
- return;
- }
- if (!strcmp(comm, "pause")) {
- // a container that holds the network namespace for the pod
- // we don't need to collect its metrics
- cg->processed = 1;
- return;
- }
- }
-
- if (cgroup_enable_systemd_services && matches_systemd_services_cgroups(cg->id)) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'cgroups to match as systemd services'", cg->id, cg->chart_id);
- convert_cgroup_to_systemd_service(cg);
- return;
- }
-
- if (matches_enabled_cgroup_renames(cg->id)) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'run script to rename cgroups matching', will try to rename it", cg->id, cg->chart_id);
- if (is_inside_k8s && k8s_is_container(cg->id)) {
- // it may take up to a minute for the K8s API to return data for the container
- // tested on AWS K8s cluster with 100% CPU utilization
- cg->pending_renames = 9; // 1.5 minute
- } else {
- cg->pending_renames = 2;
- }
- }
-}
-
-static int discovery_is_cgroup_duplicate(struct cgroup *cg) {
- // https://github.com/netdata/netdata/issues/797#issuecomment-241248884
- struct cgroup *c;
- for (c = discovered_cgroup_root; c; c = c->discovered_next) {
- if (c != cg && c->enabled && (is_cgroup_systemd_service(c) == is_cgroup_systemd_service(cg)) &&
- c->hash_chart_id == cg->hash_chart_id && !strcmp(c->chart_id, cg->chart_id)) {
- collector_error(
- "CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
- cg->chart_id,
- c->id,
- cg->id);
- return 1;
- }
- }
- return 0;
-}
-
-static inline void discovery_process_cgroup(struct cgroup *cg) {
- if (!cg) {
- netdata_log_debug(D_CGROUP, "discovery_process_cgroup() received NULL");
- return;
- }
- if (!cg->available || cg->processed) {
- return;
- }
-
- if (cg->first_time_seen) {
- worker_is_busy(WORKER_DISCOVERY_PROCESS_FIRST_TIME);
- discovery_process_first_time_seen_cgroup(cg);
- if (unlikely(cg->first_time_seen || cg->processed)) {
- return;
- }
- }
-
- if (cg->pending_renames) {
- worker_is_busy(WORKER_DISCOVERY_PROCESS_RENAME);
- discovery_rename_cgroup(cg);
- if (unlikely(cg->pending_renames || cg->processed)) {
- return;
- }
- }
-
- cg->processed = 1;
-
- if ((strlen(cg->chart_id) + strlen(cgroup_chart_id_prefix)) >= RRD_ID_LENGTH_MAX) {
- collector_info("cgroup '%s' (chart id '%s') disabled because chart_id exceeds the limit (RRD_ID_LENGTH_MAX)", cg->id, cg->chart_id);
- return;
- }
-
- if (is_cgroup_systemd_service(cg)) {
- if (discovery_is_cgroup_duplicate(cg)) {
- cg->enabled = 0;
- cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
- return;
- }
- if (!cg->chart_labels)
- cg->chart_labels = rrdlabels_create();
- rrdlabels_add(cg->chart_labels, "service_name", cg->name, RRDLABEL_SRC_AUTO);
- cg->enabled = 1;
- return;
- }
-
- if (!(cg->enabled = matches_enabled_cgroup_names(cg->name))) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups names matching'", cg->id, cg->name);
- return;
- }
-
- if (!(cg->enabled = matches_enabled_cgroup_paths(cg->id))) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups matching'", cg->id, cg->name);
- return;
- }
-
- if (discovery_is_cgroup_duplicate(cg)) {
- cg->enabled = 0;
- cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
- return;
- }
-
- if (!cg->chart_labels)
- cg->chart_labels = rrdlabels_create();
-
- if (!k8s_is_kubepod(cg)) {
- rrdlabels_add(cg->chart_labels, "cgroup_name", cg->name, RRDLABEL_SRC_AUTO);
- if (!rrdlabels_exist(cg->chart_labels, "image"))
- rrdlabels_add(cg->chart_labels, "image", "", RRDLABEL_SRC_AUTO);
- }
-
- worker_is_busy(WORKER_DISCOVERY_PROCESS_NETWORK);
- read_cgroup_network_interfaces(cg);
-}
-
-static inline void discovery_find_all_cgroups() {
- netdata_log_debug(D_CGROUP, "searching for cgroups");
-
- worker_is_busy(WORKER_DISCOVERY_INIT);
- discovery_mark_all_cgroups_as_unavailable();
-
- worker_is_busy(WORKER_DISCOVERY_FIND);
- if (!cgroup_use_unified_cgroups) {
- discovery_find_all_cgroups_v1();
- } else {
- discovery_find_all_cgroups_v2();
- }
-
- struct cgroup *cg;
- for (cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
- worker_is_busy(WORKER_DISCOVERY_PROCESS);
- discovery_process_cgroup(cg);
- }
-
- worker_is_busy(WORKER_DISCOVERY_UPDATE);
- discovery_update_filenames();
-
- worker_is_busy(WORKER_DISCOVERY_LOCK);
- uv_mutex_lock(&cgroup_root_mutex);
-
- worker_is_busy(WORKER_DISCOVERY_CLEANUP);
- discovery_cleanup_all_cgroups();
-
- worker_is_busy(WORKER_DISCOVERY_COPY);
- discovery_copy_discovered_cgroups_to_reader();
-
- uv_mutex_unlock(&cgroup_root_mutex);
-
- worker_is_busy(WORKER_DISCOVERY_SHARE);
- discovery_share_cgroups_with_ebpf();
-
- netdata_log_debug(D_CGROUP, "done searching for cgroups");
-}
-
-static inline char *cgroup_chart_type(char *buffer, struct cgroup *cg) {
- if(buffer[0]) return buffer;
-
- if (cg->chart_id[0] == '\0' || (cg->chart_id[0] == '/' && cg->chart_id[1] == '\0'))
- strncpy(buffer, "cgroup_root", RRD_ID_LENGTH_MAX);
- else if (is_cgroup_systemd_service(cg))
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", services_chart_id_prefix, cg->chart_id);
- else
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", cgroup_chart_id_prefix, cg->chart_id);
-
- return buffer;
-}
-
-void cgroup_discovery_worker(void *ptr)
-{
- UNUSED(ptr);
-
- worker_register("CGROUPSDISC");
- worker_register_job_name(WORKER_DISCOVERY_INIT, "init");
- worker_register_job_name(WORKER_DISCOVERY_FIND, "find");
- worker_register_job_name(WORKER_DISCOVERY_PROCESS, "process");
- worker_register_job_name(WORKER_DISCOVERY_PROCESS_RENAME, "rename");
- worker_register_job_name(WORKER_DISCOVERY_PROCESS_NETWORK, "network");
- worker_register_job_name(WORKER_DISCOVERY_PROCESS_FIRST_TIME, "new");
- worker_register_job_name(WORKER_DISCOVERY_UPDATE, "update");
- worker_register_job_name(WORKER_DISCOVERY_CLEANUP, "cleanup");
- worker_register_job_name(WORKER_DISCOVERY_COPY, "copy");
- worker_register_job_name(WORKER_DISCOVERY_SHARE, "share");
- worker_register_job_name(WORKER_DISCOVERY_LOCK, "lock");
-
- entrypoint_parent_process_comm = simple_pattern_create(
- " runc:[* " // http://terenceli.github.io/%E6%8A%80%E6%9C%AF/2021/12/28/runc-internals-3)
- " exe ", // https://github.com/falcosecurity/falco/blob/9d41b0a151b83693929d3a9c84f7c5c85d070d3a/rules/falco_rules.yaml#L1961
- NULL,
- SIMPLE_PATTERN_EXACT, true);
-
- service_register(SERVICE_THREAD_TYPE_LIBUV, NULL, NULL, NULL, false);
-
- while (service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
-
- uv_mutex_lock(&discovery_thread.mutex);
- uv_cond_wait(&discovery_thread.cond_var, &discovery_thread.mutex);
- uv_mutex_unlock(&discovery_thread.mutex);
-
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- discovery_find_all_cgroups();
- }
- collector_info("discovery thread stopped");
- worker_unregister();
- service_exits();
- __atomic_store_n(&discovery_thread.exited,1,__ATOMIC_RELAXED);
-}
-
-// ----------------------------------------------------------------------------
-// generate charts
-
-#define CHART_TITLE_MAX 300
-
-void update_systemd_services_charts(
- int update_every,
- int do_cpu,
- int do_mem_usage,
- int do_mem_detailed,
- int do_mem_failcnt,
- int do_swap_usage,
- int do_io,
- int do_io_ops,
- int do_throttle_io,
- int do_throttle_ops,
- int do_queued_ops,
- int do_merged_ops)
-{
- // update the values
- struct cgroup *cg;
- int systemd_cgroup_chart_priority = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD;
- char type[RRD_ID_LENGTH_MAX + 1];
-
- for (cg = cgroup_root; cg; cg = cg->next) {
- if (unlikely(!cg->enabled || cg->pending_renames || !is_cgroup_systemd_service(cg)))
- continue;
-
- type[0] = '\0';
- if (likely(do_cpu && cg->cpuacct_stat.updated)) {
- if (unlikely(!cg->st_cpu)) {
- cg->st_cpu = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "cpu_utilization",
- NULL,
- "cpu",
- "systemd.service.cpu.utilization",
- "Systemd Services CPU utilization (100%% = 1 core)",
- "percentage",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority,
- update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_update_rrdlabels(cg->st_cpu, cg->chart_labels);
- if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_add(cg->st_cpu, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_cpu, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
- } else {
- rrddim_add(cg->st_cpu, "user", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_cpu, "system", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
- }
- }
-
- // complete the iteration
- rrddim_set(cg->st_cpu, "user", cg->cpuacct_stat.user);
- rrddim_set(cg->st_cpu, "system", cg->cpuacct_stat.system);
- rrdset_done(cg->st_cpu);
- }
-
- if (unlikely(do_mem_usage && cg->memory.updated_usage_in_bytes)) {
- if (unlikely(!cg->st_mem_usage)) {
- cg->st_mem_usage = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "mem_usage",
- NULL,
- "mem",
- "systemd.service.memory.usage",
- "Systemd Services Used Memory",
- "MiB",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 5,
- update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_update_rrdlabels(cg->st_mem_usage, cg->chart_labels);
- rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- if (likely(do_swap_usage))
- rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(cg->st_mem_usage, "ram", cg->memory.usage_in_bytes);
- if (likely(do_swap_usage)) {
- if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_set(
- cg->st_mem_usage,
- "swap",
- cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ?
- cg->memory.msw_usage_in_bytes -
- (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) :
- 0);
- } else {
- rrddim_set(cg->st_mem_usage, "swap", cg->memory.msw_usage_in_bytes);
- }
- }
- rrdset_done(cg->st_mem_usage);
- }
-
- if (likely(do_mem_failcnt && cg->memory.updated_failcnt)) {
- if (unlikely(do_mem_failcnt && !cg->st_mem_failcnt)) {
- cg->st_mem_failcnt = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "mem_failcnt",
- NULL,
- "mem",
- "systemd.service.memory.failcnt",
- "Systemd Services Memory Limit Failures",
- "failures/s",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 10,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(cg->st_mem_failcnt, cg->chart_labels);
- rrddim_add(cg->st_mem_failcnt, "fail", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(cg->st_mem_failcnt, "fail", cg->memory.failcnt);
- rrdset_done(cg->st_mem_failcnt);
- }
-
- if (likely(do_mem_detailed && cg->memory.updated_detailed)) {
- if (unlikely(!cg->st_mem)) {
- cg->st_mem = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "mem_ram_usage",
- NULL,
- "mem",
- "systemd.service.memory.ram.usage",
- "Systemd Services Memory",
- "MiB",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 15,
- update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_update_rrdlabels(cg->st_mem, cg->chart_labels);
- rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(cg->st_mem, "rss", cg->memory.total_rss);
- rrddim_set(cg->st_mem, "cache", cg->memory.total_cache);
- rrddim_set(cg->st_mem, "mapped_file", cg->memory.total_mapped_file);
- rrddim_set(cg->st_mem, "rss_huge", cg->memory.total_rss_huge);
- rrdset_done(cg->st_mem);
-
- if (unlikely(!cg->st_writeback)) {
- cg->st_writeback = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "mem_writeback",
- NULL,
- "mem",
- "systemd.service.memory.writeback",
- "Systemd Services Writeback Memory",
- "MiB",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 20,
- update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_update_rrdlabels(cg->st_writeback, cg->chart_labels);
- rrddim_add(cg->st_writeback, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(cg->st_writeback, "writeback", cg->memory.total_writeback);
- rrddim_set(cg->st_writeback, "dirty", cg->memory.total_dirty);
- rrdset_done(cg->st_writeback);
-
- if (unlikely(!cg->st_pgfaults)) {
- cg->st_pgfaults = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "mem_pgfault",
- NULL,
- "mem",
- "systemd.service.memory.paging.faults",
- "Systemd Services Memory Minor and Major Page Faults",
- "MiB/s",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 25,
- update_every,
- RRDSET_TYPE_AREA);
-
- rrdset_update_rrdlabels(cg->st_pgfaults, cg->chart_labels);
- rrddim_add(cg->st_pgfaults, "minor", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_pgfaults, "major", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(cg->st_pgfaults, "minor", cg->memory.total_pgfault);
- rrddim_set(cg->st_pgfaults, "major", cg->memory.total_pgmajfault);
- rrdset_done(cg->st_pgfaults);
-
- if (unlikely(!cg->st_mem_activity)) {
- cg->st_mem_activity = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "mem_paging_io",
- NULL,
- "mem",
- "systemd.service.memory.paging.io",
- "Systemd Services Memory Paging IO",
- "MiB/s",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 30,
- update_every,
- RRDSET_TYPE_AREA);
-
- rrdset_update_rrdlabels(cg->st_mem_activity, cg->chart_labels);
- rrddim_add(cg->st_mem_activity, "in", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_mem_activity, "out", NULL, -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(cg->st_mem_activity, "in", cg->memory.total_pgpgin);
- rrddim_set(cg->st_mem_activity, "out", cg->memory.total_pgpgout);
- rrdset_done(cg->st_mem_activity);
- }
-
- if (likely(do_io && cg->io_service_bytes.updated)) {
- if (unlikely(!cg->st_io)) {
- cg->st_io = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "disk_io",
- NULL,
- "disk",
- "systemd.service.disk.io",
- "Systemd Services Disk Read/Write Bandwidth",
- "KiB/s",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 35,
- update_every,
- RRDSET_TYPE_AREA);
-
- rrdset_update_rrdlabels(cg->st_io, cg->chart_labels);
- rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
- rrddim_set(cg->st_io, "read", cg->io_service_bytes.Read);
- rrddim_set(cg->st_io, "write", cg->io_service_bytes.Write);
- rrdset_done(cg->st_io);
- }
-
- if (likely(do_io_ops && cg->io_serviced.updated)) {
- if (unlikely(!cg->st_serviced_ops)) {
- cg->st_serviced_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "disk_iops",
- NULL,
- "disk",
- "systemd.service.disk.iops",
- "Systemd Services Disk Read/Write Operations",
- "operations/s",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 40,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(cg->st_serviced_ops, cg->chart_labels);
- rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
- rrddim_set(cg->st_serviced_ops, "read", cg->io_serviced.Read);
- rrddim_set(cg->st_serviced_ops, "write", cg->io_serviced.Write);
- rrdset_done(cg->st_serviced_ops);
- }
-
- if (likely(do_throttle_io && cg->throttle_io_service_bytes.updated)) {
- if (unlikely(!cg->st_throttle_io)) {
- cg->st_throttle_io = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "disk_throttle_io",
- NULL,
- "disk",
- "systemd.service.disk.throttle.io",
- "Systemd Services Throttle Disk Read/Write Bandwidth",
- "KiB/s",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 45,
- update_every,
- RRDSET_TYPE_AREA);
-
- rrdset_update_rrdlabels(cg->st_throttle_io, cg->chart_labels);
- rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
- rrddim_set(cg->st_throttle_io, "read", cg->throttle_io_service_bytes.Read);
- rrddim_set(cg->st_throttle_io, "write", cg->throttle_io_service_bytes.Write);
- rrdset_done(cg->st_throttle_io);
- }
-
- if (likely(do_throttle_ops && cg->throttle_io_serviced.updated)) {
- if (unlikely(!cg->st_throttle_serviced_ops)) {
- cg->st_throttle_serviced_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "disk_throttle_iops",
- NULL,
- "disk",
- "systemd.service.disk.throttle.iops",
- "Systemd Services Throttle Disk Read/Write Operations",
- "operations/s",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 50,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(cg->st_throttle_serviced_ops, cg->chart_labels);
- rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
- rrddim_set(cg->st_throttle_serviced_ops, "read", cg->throttle_io_serviced.Read);
- rrddim_set(cg->st_throttle_serviced_ops, "write", cg->throttle_io_serviced.Write);
- rrdset_done(cg->st_throttle_serviced_ops);
- }
-
- if (likely(do_queued_ops && cg->io_queued.updated)) {
- if (unlikely(!cg->st_queued_ops)) {
- cg->st_queued_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "disk_queued_iops",
- NULL,
- "disk",
- "systemd.service.disk.queued_iops",
- "Systemd Services Queued Disk Read/Write Operations",
- "operations/s",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 55,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(cg->st_queued_ops, cg->chart_labels);
- rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
- rrddim_set(cg->st_queued_ops, "read", cg->io_queued.Read);
- rrddim_set(cg->st_queued_ops, "write", cg->io_queued.Write);
- rrdset_done(cg->st_queued_ops);
- }
-
- if (likely(do_merged_ops && cg->io_merged.updated)) {
- if (unlikely(!cg->st_merged_ops)) {
- cg->st_merged_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg),
- "disk_merged_iops",
- NULL,
- "disk",
- "systemd.service.disk.merged_iops",
- "Systemd Services Merged Disk Read/Write Operations",
- "operations/s",
- PLUGIN_CGROUPS_NAME,
- PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
- systemd_cgroup_chart_priority + 60,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(cg->st_merged_ops, cg->chart_labels);
- rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
- rrddim_set(cg->st_merged_ops, "read", cg->io_merged.Read);
- rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write);
- rrdset_done(cg->st_merged_ops);
- }
- }
-}
+// update CPU and memory limits
static inline void update_cpu_limits(char **filename, unsigned long long *value, struct cgroup *cg) {
if(*filename) {
@@ -3225,10 +1224,7 @@ static inline void update_cpu_limits(char **filename, unsigned long long *value,
ret = 0;
}
}
- else if(value == &cg->cpu_cfs_period) {
- ret = read_single_number_file(*filename, value);
- }
- else if(value == &cg->cpu_cfs_quota) {
+ else if(value == &cg->cpu_cfs_period || value == &cg->cpu_cfs_quota) {
ret = read_single_number_file(*filename, value);
}
else ret = -1;
@@ -3282,12 +1278,16 @@ cpu_limits2_err:
}
}
-static inline int update_memory_limits(char **filename, const RRDSETVAR_ACQUIRED **chart_var, unsigned long long *value, const char *chart_var_name, struct cgroup *cg) {
+static inline int update_memory_limits(struct cgroup *cg) {
+ char **filename = &cg->filename_memory_limit;
+ const RRDSETVAR_ACQUIRED **chart_var = &cg->chart_var_memory_limit;
+ unsigned long long *value = &cg->memory_limit;
+
if(*filename) {
if(unlikely(!*chart_var)) {
- *chart_var = rrdsetvar_custom_chart_variable_add_and_acquire(cg->st_mem_usage, chart_var_name);
+ *chart_var = rrdsetvar_custom_chart_variable_add_and_acquire(cg->st_mem_usage, "memory_limit");
if(!*chart_var) {
- collector_error("Cannot create cgroup %s chart variable '%s'. Will not update its limit anymore.", cg->id, chart_var_name);
+ collector_error("Cannot create cgroup %s chart variable '%s'. Will not update its limit anymore.", cg->id, "memory_limit");
freez(*filename);
*filename = NULL;
}
@@ -3301,7 +1301,7 @@ static inline int update_memory_limits(char **filename, const RRDSETVAR_ACQUIRED
*filename = NULL;
}
else {
- rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value / (1024 * 1024)));
+ rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value) / (1024.0 * 1024.0));
return 1;
}
} else {
@@ -3316,11 +1316,11 @@ static inline int update_memory_limits(char **filename, const RRDSETVAR_ACQUIRED
char *s = "max\n\0";
if(strcmp(s, buffer) == 0){
*value = UINT64_MAX;
- rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value / (1024 * 1024)));
+ rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value) / (1024.0 * 1024.0));
return 1;
}
*value = str2ull(buffer, NULL);
- rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value / (1024 * 1024)));
+ rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value) / (1024.0 * 1024.0));
return 1;
}
}
@@ -3328,85 +1328,65 @@ static inline int update_memory_limits(char **filename, const RRDSETVAR_ACQUIRED
return 0;
}
-void update_cgroup_charts(int update_every) {
- netdata_log_debug(D_CGROUP, "updating cgroups charts");
-
- char type[RRD_ID_LENGTH_MAX + 1];
- char title[CHART_TITLE_MAX + 1];
-
- int services_do_cpu = 0,
- services_do_mem_usage = 0,
- services_do_mem_detailed = 0,
- services_do_mem_failcnt = 0,
- services_do_swap_usage = 0,
- services_do_io = 0,
- services_do_io_ops = 0,
- services_do_throttle_io = 0,
- services_do_throttle_ops = 0,
- services_do_queued_ops = 0,
- services_do_merged_ops = 0;
+// ----------------------------------------------------------------------------
+// generate charts
- struct cgroup *cg;
- for(cg = cgroup_root; cg ; cg = cg->next) {
- if(unlikely(!cg->enabled || cg->pending_renames))
+void update_cgroup_systemd_services_charts() {
+ for (struct cgroup *cg = cgroup_root; cg; cg = cg->next) {
+ if (unlikely(!cg->enabled || cg->pending_renames || !is_cgroup_systemd_service(cg)))
continue;
- if(likely(cgroup_enable_systemd_services && is_cgroup_systemd_service(cg))) {
- if(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES) services_do_cpu++;
-
- if(cgroup_enable_systemd_services_detailed_memory && cg->memory.updated_detailed && cg->memory.enabled_detailed) services_do_mem_detailed++;
- if(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_mem_usage++;
- if(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES) services_do_mem_failcnt++;
- if(cg->memory.updated_msw_usage_in_bytes && cg->memory.enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_YES) services_do_swap_usage++;
+ if (likely(cg->cpuacct_stat.updated)) {
+ update_cpu_utilization_chart(cg);
+ }
+ if (likely(cg->memory.updated_msw_usage_in_bytes)) {
+ update_mem_usage_chart(cg);
+ }
+ if (likely(cg->memory.updated_failcnt)) {
+ update_mem_failcnt_chart(cg);
+ }
+ if (likely(cg->memory.updated_detailed)) {
+ update_mem_usage_detailed_chart(cg);
+ update_mem_writeback_chart(cg);
+ update_mem_pgfaults_chart(cg);
+ if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ update_mem_activity_chart(cg);
+ }
+ }
+ if (likely(cg->io_service_bytes.updated)) {
+ update_io_serviced_bytes_chart(cg);
+ }
+ if (likely(cg->io_serviced.updated)) {
+ update_io_serviced_ops_chart(cg);
+ }
+ if (likely(cg->throttle_io_service_bytes.updated)) {
+ update_throttle_io_serviced_bytes_chart(cg);
+ }
+ if (likely(cg->throttle_io_serviced.updated)) {
+ update_throttle_io_serviced_ops_chart(cg);
+ }
+ if (likely(cg->io_queued.updated)) {
+ update_io_queued_ops_chart(cg);
+ }
+ if (likely(cg->io_merged.updated)) {
+ update_io_merged_ops_chart(cg);
+ }
- if(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_io++;
- if(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_io_ops++;
- if(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_io++;
- if(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES) services_do_throttle_ops++;
- if(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES) services_do_queued_ops++;
- if(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES) services_do_merged_ops++;
- continue;
+ if (likely(cg->pids.pids_current_updated)) {
+ update_pids_current_chart(cg);
}
- type[0] = '\0';
-
- if(likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_cpu)) {
- snprintfz(
- title,
- CHART_TITLE_MAX,
- k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU)" : "CPU Usage (100%% = 1 core)");
-
- cg->st_cpu = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "cpu"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu" : "cgroup.cpu"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_update_rrdlabels(cg->st_cpu, cg->chart_labels);
+ cg->function_ready = true;
+ }
+}
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_add(cg->st_cpu, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_cpu, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
- }
- else {
- rrddim_add(cg->st_cpu, "user", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_cpu, "system", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
- }
- }
+void update_cgroup_charts() {
+ for (struct cgroup *cg = cgroup_root; cg; cg = cg->next) {
+ if(unlikely(!cg->enabled || cg->pending_renames || is_cgroup_systemd_service(cg)))
+ continue;
- rrddim_set(cg->st_cpu, "user", cg->cpuacct_stat.user);
- rrddim_set(cg->st_cpu, "system", cg->cpuacct_stat.system);
- rrdset_done(cg->st_cpu);
+ if (likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) {
+ update_cpu_utilization_chart(cg);
if(likely(cg->filename_cpuset_cpus || cg->filename_cpu_cfs_period || cg->filename_cpu_cfs_quota)) {
if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
@@ -3428,8 +1408,7 @@ void update_cgroup_charts(int update_every) {
if(cg->filename_cpu_cfs_quota) freez(cg->filename_cpu_cfs_quota);
cg->filename_cpu_cfs_quota = NULL;
}
- }
- else {
+ } else {
NETDATA_DOUBLE value = 0, quota = 0;
if(likely( ((!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) && (cg->filename_cpuset_cpus || (cg->filename_cpu_cfs_period && cg->filename_cpu_cfs_quota)))
@@ -3443,49 +1422,10 @@ void update_cgroup_charts(int update_every) {
value = (NETDATA_DOUBLE)cg->cpuset_cpus * 100;
}
if(likely(value)) {
- if(unlikely(!cg->st_cpu_limit)) {
- snprintfz(title, CHART_TITLE_MAX, "CPU Usage within the limits");
-
- cg->st_cpu_limit = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "cpu_limit"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_limit" : "cgroup.cpu_limit"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority - 1
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_cpu_limit, cg->chart_labels);
-
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED))
- rrddim_add(cg->st_cpu_limit, "used", NULL, 1, system_hz, RRD_ALGORITHM_ABSOLUTE);
- else
- rrddim_add(cg->st_cpu_limit, "used", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
- cg->prev_cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100;
- }
-
- NETDATA_DOUBLE cpu_usage = 0;
- cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100;
- NETDATA_DOUBLE cpu_used = 100 * (cpu_usage - cg->prev_cpu_usage) / (value * update_every);
-
- rrdset_isnot_obsolete(cg->st_cpu_limit);
-
- rrddim_set(cg->st_cpu_limit, "used", (cpu_used > 0)?cpu_used:0);
-
- cg->prev_cpu_usage = cpu_usage;
-
- rrdsetvar_custom_chart_variable_set(cg->st_cpu, cg->chart_var_cpu_limit, value);
- rrdset_done(cg->st_cpu_limit);
- }
- else {
- if(unlikely(cg->st_cpu_limit)) {
- rrdset_is_obsolete(cg->st_cpu_limit);
+ update_cpu_utilization_limit_chart(cg, value);
+ } else {
+ if (unlikely(cg->st_cpu_limit)) {
+ rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_limit);
cg->st_cpu_limit = NULL;
}
rrdsetvar_custom_chart_variable_set(cg->st_cpu, cg->chart_var_cpu_limit, NAN);
@@ -3495,1056 +1435,137 @@ void update_cgroup_charts(int update_every) {
}
if (likely(cg->cpuacct_cpu_throttling.updated && cg->cpuacct_cpu_throttling.enabled == CONFIG_BOOLEAN_YES)) {
- if (unlikely(!cg->st_cpu_nr_throttled)) {
- snprintfz(title, CHART_TITLE_MAX, "CPU Throttled Runnable Periods");
-
- cg->st_cpu_nr_throttled = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "throttled"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.throttled" : "cgroup.throttled"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 10
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_cpu_nr_throttled, cg->chart_labels);
- rrddim_add(cg->st_cpu_nr_throttled, "throttled", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- } else {
- rrddim_set(cg->st_cpu_nr_throttled, "throttled", cg->cpuacct_cpu_throttling.nr_throttled_perc);
- rrdset_done(cg->st_cpu_nr_throttled);
- }
-
- if (unlikely(!cg->st_cpu_throttled_time)) {
- snprintfz(title, CHART_TITLE_MAX, "CPU Throttled Time Duration");
-
- cg->st_cpu_throttled_time = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "throttled_duration"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.throttled_duration" : "cgroup.throttled_duration"
- , title
- , "ms"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 15
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_cpu_throttled_time, cg->chart_labels);
- rrddim_add(cg->st_cpu_throttled_time, "duration", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
- } else {
- rrddim_set(cg->st_cpu_throttled_time, "duration", cg->cpuacct_cpu_throttling.throttled_time);
- rrdset_done(cg->st_cpu_throttled_time);
- }
+ update_cpu_throttled_chart(cg);
+ update_cpu_throttled_duration_chart(cg);
}
if (likely(cg->cpuacct_cpu_shares.updated && cg->cpuacct_cpu_shares.enabled == CONFIG_BOOLEAN_YES)) {
- if (unlikely(!cg->st_cpu_shares)) {
- snprintfz(title, CHART_TITLE_MAX, "CPU Time Relative Share");
-
- cg->st_cpu_shares = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "cpu_shares"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_shares" : "cgroup.cpu_shares"
- , title
- , "shares"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 20
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_cpu_shares, cg->chart_labels);
- rrddim_add(cg->st_cpu_shares, "shares", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- } else {
- rrddim_set(cg->st_cpu_shares, "shares", cg->cpuacct_cpu_shares.shares);
- rrdset_done(cg->st_cpu_shares);
- }
+ update_cpu_shares_chart(cg);
}
- if(likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) {
- char id[RRD_ID_LENGTH_MAX + 1];
- unsigned int i;
-
- if(unlikely(!cg->st_cpu_per_core)) {
- snprintfz(
- title,
- CHART_TITLE_MAX,
- k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU) Per Core" :
- "CPU Usage (100%% = 1 core) Per Core");
-
- cg->st_cpu_per_core = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "cpu_per_core"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_per_core" : "cgroup.cpu_per_core"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 100
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_update_rrdlabels(cg->st_cpu_per_core, cg->chart_labels);
-
- for(i = 0; i < cg->cpuacct_usage.cpus; i++) {
- snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
- rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL);
- }
- }
-
- for(i = 0; i < cg->cpuacct_usage.cpus ;i++) {
- snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
- rrddim_set(cg->st_cpu_per_core, id, cg->cpuacct_usage.cpu_percpu[i]);
- }
- rrdset_done(cg->st_cpu_per_core);
+ if (likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) {
+ update_cpu_per_core_usage_chart(cg);
}
- if(likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_mem)) {
- snprintfz(title, CHART_TITLE_MAX, "Memory Usage");
-
- cg->st_mem = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "mem"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.mem" : "cgroup.mem"
- , title
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 220
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_update_rrdlabels(cg->st_mem, cg->chart_labels);
-
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- if(cg->memory.detailed_has_swap)
- rrddim_add(cg->st_mem, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_add(cg->st_mem, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- } else {
- rrddim_add(cg->st_mem, "anon", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "kernel_stack", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "slab", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "sock", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "anon_thp", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem, "file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
- }
-
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_set(cg->st_mem, "cache", cg->memory.total_cache);
- rrddim_set(cg->st_mem, "rss", (cg->memory.total_rss > cg->memory.total_rss_huge)?(cg->memory.total_rss - cg->memory.total_rss_huge):0);
-
- if(cg->memory.detailed_has_swap)
- rrddim_set(cg->st_mem, "swap", cg->memory.total_swap);
-
- rrddim_set(cg->st_mem, "rss_huge", cg->memory.total_rss_huge);
- rrddim_set(cg->st_mem, "mapped_file", cg->memory.total_mapped_file);
- } else {
- rrddim_set(cg->st_mem, "anon", cg->memory.anon);
- rrddim_set(cg->st_mem, "kernel_stack", cg->memory.kernel_stack);
- rrddim_set(cg->st_mem, "slab", cg->memory.slab);
- rrddim_set(cg->st_mem, "sock", cg->memory.sock);
- rrddim_set(cg->st_mem, "anon_thp", cg->memory.anon_thp);
- rrddim_set(cg->st_mem, "file", cg->memory.total_mapped_file);
- }
- rrdset_done(cg->st_mem);
-
- if(unlikely(!cg->st_writeback)) {
- snprintfz(title, CHART_TITLE_MAX, "Writeback Memory");
-
- cg->st_writeback = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "writeback"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.writeback" : "cgroup.writeback"
- , title
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 300
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_update_rrdlabels(cg->st_writeback, cg->chart_labels);
-
- if(cg->memory.detailed_has_dirty)
- rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_add(cg->st_writeback, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- if(cg->memory.detailed_has_dirty)
- rrddim_set(cg->st_writeback, "dirty", cg->memory.total_dirty);
-
- rrddim_set(cg->st_writeback, "writeback", cg->memory.total_writeback);
- rrdset_done(cg->st_writeback);
+ if (likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) {
+ update_mem_usage_detailed_chart(cg);
+ update_mem_writeback_chart(cg);
if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- if(unlikely(!cg->st_mem_activity)) {
- snprintfz(title, CHART_TITLE_MAX, "Memory Activity");
-
- cg->st_mem_activity = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "mem_activity"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_activity" : "cgroup.mem_activity"
- , title
- , "MiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 400
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_mem_activity, cg->chart_labels);
-
- rrddim_add(cg->st_mem_activity, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_mem_activity, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(cg->st_mem_activity, "pgpgin", cg->memory.total_pgpgin);
- rrddim_set(cg->st_mem_activity, "pgpgout", cg->memory.total_pgpgout);
- rrdset_done(cg->st_mem_activity);
- }
-
- if(unlikely(!cg->st_pgfaults)) {
- snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults");
-
- cg->st_pgfaults = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "pgfaults"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.pgfaults" : "cgroup.pgfaults"
- , title
- , "MiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 500
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_pgfaults, cg->chart_labels);
-
- rrddim_add(cg->st_pgfaults, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_pgfaults, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ update_mem_activity_chart(cg);
}
- rrddim_set(cg->st_pgfaults, "pgfault", cg->memory.total_pgfault);
- rrddim_set(cg->st_pgfaults, "pgmajfault", cg->memory.total_pgmajfault);
- rrdset_done(cg->st_pgfaults);
+ update_mem_pgfaults_chart(cg);
}
- if(likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_mem_usage)) {
- snprintfz(title, CHART_TITLE_MAX, "Used Memory");
-
- cg->st_mem_usage = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "mem_usage"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage" : "cgroup.mem_usage"
- , title
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 210
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_update_rrdlabels(cg->st_mem_usage, cg->chart_labels);
-
- rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(cg->st_mem_usage, "ram", cg->memory.usage_in_bytes);
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_set(
- cg->st_mem_usage,
- "swap",
- cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ?
- cg->memory.msw_usage_in_bytes - (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) : 0);
- } else {
- rrddim_set(cg->st_mem_usage, "swap", cg->memory.msw_usage_in_bytes);
- }
- rrdset_done(cg->st_mem_usage);
-
- if (likely(update_memory_limits(&cg->filename_memory_limit, &cg->chart_var_memory_limit, &cg->memory_limit, "memory_limit", cg))) {
- static unsigned long long ram_total = 0;
-
- if(unlikely(!ram_total)) {
- procfile *ff = NULL;
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo");
- ff = procfile_open(config_get("plugin:cgroups", "meminfo filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
-
- if(likely(ff))
- ff = procfile_readall(ff);
- if(likely(ff && procfile_lines(ff) && !strncmp(procfile_word(ff, 0), "MemTotal", 8)))
- ram_total = str2ull(procfile_word(ff, 1), NULL) * 1024;
- else {
- collector_error("Cannot read file %s. Will not update cgroup %s RAM limit anymore.", filename, cg->id);
- freez(cg->filename_memory_limit);
- cg->filename_memory_limit = NULL;
- }
+ if (likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) {
+ update_mem_usage_chart(cg);
- procfile_close(ff);
- }
-
- if(likely(ram_total)) {
- unsigned long long memory_limit = ram_total;
+ // FIXME: this if should be only for unlimited charts
+ if(likely(host_ram_total)) {
+ // FIXME: do we need to update mem limits on every data collection?
+ if (likely(update_memory_limits(cg))) {
- if(unlikely(cg->memory_limit < ram_total))
+ unsigned long long memory_limit = host_ram_total;
+ if (unlikely(cg->memory_limit < host_ram_total))
memory_limit = cg->memory_limit;
- if(unlikely(!cg->st_mem_usage_limit)) {
- snprintfz(title, CHART_TITLE_MAX, "Used RAM within the limits");
-
- cg->st_mem_usage_limit = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "mem_usage_limit"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage_limit": "cgroup.mem_usage_limit"
- , title
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 200
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_update_rrdlabels(cg->st_mem_usage_limit, cg->chart_labels);
-
- rrddim_add(cg->st_mem_usage_limit, "available", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_mem_usage_limit, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrdset_isnot_obsolete(cg->st_mem_usage_limit);
-
- rrddim_set(cg->st_mem_usage_limit, "available", memory_limit - cg->memory.usage_in_bytes);
- rrddim_set(cg->st_mem_usage_limit, "used", cg->memory.usage_in_bytes);
- rrdset_done(cg->st_mem_usage_limit);
-
- if (unlikely(!cg->st_mem_utilization)) {
- snprintfz(title, CHART_TITLE_MAX, "Memory Utilization");
-
- cg->st_mem_utilization = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "mem_utilization"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_utilization" : "cgroup.mem_utilization"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 199
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_update_rrdlabels(cg->st_mem_utilization, cg->chart_labels);
-
- rrddim_add(cg->st_mem_utilization, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ update_mem_usage_limit_chart(cg, memory_limit);
+ update_mem_utilization_chart(cg, memory_limit);
+ } else {
+ if (unlikely(cg->st_mem_usage_limit)) {
+ rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_usage_limit);
+ cg->st_mem_usage_limit = NULL;
}
- if (memory_limit) {
- rrdset_isnot_obsolete(cg->st_mem_utilization);
-
- rrddim_set(
- cg->st_mem_utilization, "utilization", cg->memory.usage_in_bytes * 100 / memory_limit);
- rrdset_done(cg->st_mem_utilization);
+ if (unlikely(cg->st_mem_utilization)) {
+ rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_utilization);
+ cg->st_mem_utilization = NULL;
}
}
}
- else {
- if(unlikely(cg->st_mem_usage_limit)) {
- rrdset_is_obsolete(cg->st_mem_usage_limit);
- cg->st_mem_usage_limit = NULL;
- }
-
- if(unlikely(cg->st_mem_utilization)) {
- rrdset_is_obsolete(cg->st_mem_utilization);
- cg->st_mem_utilization = NULL;
- }
- }
-
- update_memory_limits(&cg->filename_memoryswap_limit, &cg->chart_var_memoryswap_limit, &cg->memoryswap_limit, "memory_and_swap_limit", cg);
}
- if(likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_mem_failcnt)) {
- snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures");
-
- cg->st_mem_failcnt = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "mem_failcnt"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.mem_failcnt" : "cgroup.mem_failcnt"
- , title
- , "count"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 250
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_mem_failcnt, cg->chart_labels);
-
- rrddim_add(cg->st_mem_failcnt, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(cg->st_mem_failcnt, "failures", cg->memory.failcnt);
- rrdset_done(cg->st_mem_failcnt);
+ if (likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) {
+ update_mem_failcnt_chart(cg);
}
- if(likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_io)) {
- snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks)");
-
- cg->st_io = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "io"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.io" : "cgroup.io"
- , title
- , "KiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 1200
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_update_rrdlabels(cg->st_io, cg->chart_labels);
-
- rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(cg->st_io, "read", cg->io_service_bytes.Read);
- rrddim_set(cg->st_io, "write", cg->io_service_bytes.Write);
- rrdset_done(cg->st_io);
+ if (likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
+ update_io_serviced_bytes_chart(cg);
}
- if(likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_serviced_ops)) {
- snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks)");
-
- cg->st_serviced_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "serviced_ops"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.serviced_ops" : "cgroup.serviced_ops"
- , title
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 1200
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_serviced_ops, cg->chart_labels);
-
- rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(cg->st_serviced_ops, "read", cg->io_serviced.Read);
- rrddim_set(cg->st_serviced_ops, "write", cg->io_serviced.Write);
- rrdset_done(cg->st_serviced_ops);
+ if (likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
+ update_io_serviced_ops_chart(cg);
}
- if(likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_throttle_io)) {
- snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks)");
-
- cg->st_throttle_io = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "throttle_io"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_io" : "cgroup.throttle_io"
- , title
- , "KiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 1200
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_update_rrdlabels(cg->st_throttle_io, cg->chart_labels);
-
- rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(cg->st_throttle_io, "read", cg->throttle_io_service_bytes.Read);
- rrddim_set(cg->st_throttle_io, "write", cg->throttle_io_service_bytes.Write);
- rrdset_done(cg->st_throttle_io);
+ if (likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
+ update_throttle_io_serviced_bytes_chart(cg);
}
- if(likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_throttle_serviced_ops)) {
- snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks)");
-
- cg->st_throttle_serviced_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "throttle_serviced_ops"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_serviced_ops" : "cgroup.throttle_serviced_ops"
- , title
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 1200
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_throttle_serviced_ops, cg->chart_labels);
-
- rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(cg->st_throttle_serviced_ops, "read", cg->throttle_io_serviced.Read);
- rrddim_set(cg->st_throttle_serviced_ops, "write", cg->throttle_io_serviced.Write);
- rrdset_done(cg->st_throttle_serviced_ops);
+ if (likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
+ update_throttle_io_serviced_ops_chart(cg);
}
- if(likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_queued_ops)) {
- snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks)");
-
- cg->st_queued_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "queued_ops"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.queued_ops" : "cgroup.queued_ops"
- , title
- , "operations"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2000
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_queued_ops, cg->chart_labels);
-
- rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(cg->st_queued_ops, "read", cg->io_queued.Read);
- rrddim_set(cg->st_queued_ops, "write", cg->io_queued.Write);
- rrdset_done(cg->st_queued_ops);
+ if (likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) {
+ update_io_queued_ops_chart(cg);
}
- if(likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) {
- if(unlikely(!cg->st_merged_ops)) {
- snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks)");
-
- cg->st_merged_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "merged_ops"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.merged_ops" : "cgroup.merged_ops"
- , title
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2100
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(cg->st_merged_ops, cg->chart_labels);
-
- rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
+ if (likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) {
+ update_io_merged_ops_chart(cg);
+ }
- rrddim_set(cg->st_merged_ops, "read", cg->io_merged.Read);
- rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write);
- rrdset_done(cg->st_merged_ops);
+ if (likely(cg->pids.pids_current_updated)) {
+ update_pids_current_chart(cg);
}
if (cg->options & CGROUP_OPTIONS_IS_UNIFIED) {
- struct pressure *res = &cg->cpu_pressure;
-
- if (likely(res->updated && res->some.enabled)) {
- struct pressure_charts *pcs;
- pcs = &res->some;
-
- if (unlikely(!pcs->share_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "CPU some pressure");
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "cpu_some_pressure"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure" : "cgroup.cpu_some_pressure"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2200
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- if (unlikely(!pcs->total_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "CPU some pressure stall time");
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "cpu_some_pressure_stall_time"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure_stall_time" : "cgroup.cpu_some_pressure_stall_time"
- , title
- , "ms"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2220
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- update_pressure_charts(pcs);
- }
- if (likely(res->updated && res->full.enabled)) {
- struct pressure_charts *pcs;
- pcs = &res->full;
-
- if (unlikely(!pcs->share_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "CPU full pressure");
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "cpu_full_pressure"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure" : "cgroup.cpu_full_pressure"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2240
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- if (unlikely(!pcs->total_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "CPU full pressure stall time");
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "cpu_full_pressure_stall_time"
- , NULL
- , "cpu"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure_stall_time" : "cgroup.cpu_full_pressure_stall_time"
- , title
- , "ms"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2260
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- update_pressure_charts(pcs);
- }
-
- res = &cg->memory_pressure;
-
- if (likely(res->updated && res->some.enabled)) {
- struct pressure_charts *pcs;
- pcs = &res->some;
-
- if (unlikely(!pcs->share_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "Memory some pressure");
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "mem_some_pressure"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure" : "cgroup.memory_some_pressure"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2300
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- if (unlikely(!pcs->total_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "Memory some pressure stall time");
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "memory_some_pressure_stall_time"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure_stall_time" : "cgroup.memory_some_pressure_stall_time"
- , title
- , "ms"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2320
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- update_pressure_charts(pcs);
- }
-
- if (likely(res->updated && res->full.enabled)) {
- struct pressure_charts *pcs;
- pcs = &res->full;
-
- if (unlikely(!pcs->share_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "Memory full pressure");
-
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "mem_full_pressure"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure" : "cgroup.memory_full_pressure"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2340
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- if (unlikely(!pcs->total_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "Memory full pressure stall time");
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "memory_full_pressure_stall_time"
- , NULL
- , "mem"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure_stall_time" : "cgroup.memory_full_pressure_stall_time"
- , title
- , "ms"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2360
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- update_pressure_charts(pcs);
- }
-
- res = &cg->irq_pressure;
-
- if (likely(res->updated && res->some.enabled)) {
- struct pressure_charts *pcs;
- pcs = &res->some;
-
- if (unlikely(!pcs->share_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "IRQ some pressure");
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "irq_some_pressure"
- , NULL
- , "interrupts"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure" : "cgroup.irq_some_pressure"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2310
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- if (unlikely(!pcs->total_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "IRQ some pressure stall time");
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "irq_some_pressure_stall_time"
- , NULL
- , "interrupts"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure_stall_time" : "cgroup.irq_some_pressure_stall_time"
- , title
- , "ms"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2330
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- update_pressure_charts(pcs);
+ if (likely(cg->cpu_pressure.updated)) {
+ if (cg->cpu_pressure.some.enabled) {
+ update_cpu_some_pressure_chart(cg);
+ update_cpu_some_pressure_stall_time_chart(cg);
+ }
+ if (cg->cpu_pressure.full.enabled) {
+ update_cpu_full_pressure_chart(cg);
+ update_cpu_full_pressure_stall_time_chart(cg);
+ }
}
- if (likely(res->updated && res->full.enabled)) {
- struct pressure_charts *pcs;
- pcs = &res->full;
-
- if (unlikely(!pcs->share_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "IRQ full pressure");
-
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "irq_full_pressure"
- , NULL
- , "interrupts"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure" : "cgroup.irq_full_pressure"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2350
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if (likely(cg->memory_pressure.updated)) {
+ if (cg->memory_pressure.some.enabled) {
+ update_mem_some_pressure_chart(cg);
+ update_mem_some_pressure_stall_time_chart(cg);
}
-
- if (unlikely(!pcs->total_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "IRQ full pressure stall time");
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "irq_full_pressure_stall_time"
- , NULL
- , "interrupts"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure_stall_time" : "cgroup.irq_full_pressure_stall_time"
- , title
- , "ms"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2370
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if (cg->memory_pressure.full.enabled) {
+ update_mem_full_pressure_chart(cg);
+ update_mem_full_pressure_stall_time_chart(cg);
}
-
- update_pressure_charts(pcs);
}
- res = &cg->io_pressure;
-
- if (likely(res->updated && res->some.enabled)) {
- struct pressure_charts *pcs;
- pcs = &res->some;
-
- if (unlikely(!pcs->share_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "I/O some pressure");
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "io_some_pressure"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure" : "cgroup.io_some_pressure"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2400
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if (likely(cg->irq_pressure.updated)) {
+ if (cg->irq_pressure.some.enabled) {
+ update_irq_some_pressure_chart(cg);
+ update_irq_some_pressure_stall_time_chart(cg);
}
-
- if (unlikely(!pcs->total_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "I/O some pressure stall time");
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "io_some_pressure_stall_time"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure_stall_time" : "cgroup.io_some_pressure_stall_time"
- , title
- , "ms"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2420
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if (cg->irq_pressure.full.enabled) {
+ update_irq_full_pressure_chart(cg);
+ update_irq_full_pressure_stall_time_chart(cg);
}
-
- update_pressure_charts(pcs);
}
- if (likely(res->updated && res->full.enabled)) {
- struct pressure_charts *pcs;
- pcs = &res->full;
-
- if (unlikely(!pcs->share_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "I/O full pressure");
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "io_full_pressure"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure" : "cgroup.io_full_pressure"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2440
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ if (likely(cg->io_pressure.updated)) {
+ if (cg->io_pressure.some.enabled) {
+ update_io_some_pressure_chart(cg);
+ update_io_some_pressure_stall_time_chart(cg);
}
-
- if (unlikely(!pcs->total_time.st)) {
- RRDSET *chart;
- snprintfz(title, CHART_TITLE_MAX, "I/O full pressure stall time");
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg)
- , "io_full_pressure_stall_time"
- , NULL
- , "disk"
- , k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure_stall_time" : "cgroup.io_full_pressure_stall_time"
- , title
- , "ms"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
- , cgroup_containers_chart_priority + 2460
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if (cg->io_pressure.full.enabled) {
+ update_io_full_pressure_chart(cg);
+ update_io_full_pressure_stall_time_chart(cg);
}
-
- update_pressure_charts(pcs);
}
}
- }
-
- if(likely(cgroup_enable_systemd_services))
- update_systemd_services_charts(update_every, services_do_cpu, services_do_mem_usage, services_do_mem_detailed
- , services_do_mem_failcnt, services_do_swap_usage, services_do_io
- , services_do_io_ops, services_do_throttle_io, services_do_throttle_ops
- , services_do_queued_ops, services_do_merged_ops
- );
- netdata_log_debug(D_CGROUP, "done updating cgroups charts");
+ cg->function_ready = true;
+ }
}
// ----------------------------------------------------------------------------
@@ -4587,6 +1608,22 @@ static void cgroup_main_cleanup(void *ptr) {
static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
}
+void cgroup_read_host_total_ram() {
+ procfile *ff = NULL;
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo");
+
+ ff = procfile_open(
+ config_get("plugin:cgroups", "meminfo filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+
+ if (likely((ff = procfile_readall(ff)) && procfile_lines(ff) && !strncmp(procfile_word(ff, 0), "MemTotal", 8)))
+ host_ram_total = str2ull(procfile_word(ff, 1), NULL) * 1024;
+ else
+ collector_error("Cannot read file %s. Will not create RAM limit charts.", filename);
+
+ procfile_close(ff);
+}
+
void *cgroups_main(void *ptr) {
worker_register("CGROUPS");
worker_register_job_name(WORKER_CGROUPS_LOCK, "lock");
@@ -4601,6 +1638,9 @@ void *cgroups_main(void *ptr) {
}
read_cgroup_plugin_configuration();
+
+ cgroup_read_host_total_ram();
+
netdata_cgroup_ebpf_initialize_shm();
if (uv_mutex_init(&cgroup_root_mutex)) {
@@ -4624,7 +1664,15 @@ void *cgroups_main(void *ptr) {
collector_error("CGROUP: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error));
goto exit;
}
- uv_thread_set_name_np(discovery_thread.thread, "PLUGIN[cgroups]");
+
+ uv_thread_set_name_np(discovery_thread.thread, "P[cgroups]");
+
+ // we register this only on localhost
+ // for the other nodes, the origin server should register it
+ rrd_collector_started(); // this creates a collector that runs for as long as netdata runs
+ cgroup_netdev_link_init();
+ rrd_function_add(localhost, NULL, "containers-vms", 10, RRDFUNCTIONS_CGTOP_HELP, true, cgroup_function_cgroup_top, NULL);
+ rrd_function_add(localhost, NULL, "systemd-services", 10, RRDFUNCTIONS_CGTOP_HELP, true, cgroup_function_systemd_top, NULL);
heartbeat_t hb;
heartbeat_init(&hb);
@@ -4632,11 +1680,13 @@ void *cgroups_main(void *ptr) {
usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0;
netdata_thread_disable_cancelability();
+
while(service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
usec_t hb_dt = heartbeat_next(&hb, step);
- if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
+ if (unlikely(!service_running(SERVICE_COLLECTORS)))
+ break;
find_dt += hb_dt;
if (unlikely(find_dt >= find_every || (!is_inside_k8s && cgroups_check))) {
@@ -4652,12 +1702,18 @@ void *cgroups_main(void *ptr) {
worker_is_busy(WORKER_CGROUPS_READ);
read_all_discovered_cgroups(cgroup_root);
+
if (unlikely(!service_running(SERVICE_COLLECTORS))) {
uv_mutex_unlock(&cgroup_root_mutex);
break;
}
+
worker_is_busy(WORKER_CGROUPS_CHART);
- update_cgroup_charts(cgroup_update_every);
+
+ update_cgroup_charts();
+ if (cgroup_enable_systemd_services)
+ update_cgroup_systemd_services_charts();
+
if (unlikely(!service_running(SERVICE_COLLECTORS))) {
uv_mutex_unlock(&cgroup_root_mutex);
break;
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h
index 625be755d..e8cfcf5f6 100644
--- a/collectors/cgroups.plugin/sys_fs_cgroup.h
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.h
@@ -5,6 +5,10 @@
#include "daemon/common.h"
+#define PLUGIN_CGROUPS_NAME "cgroups.plugin"
+#define PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME "systemd"
+#define PLUGIN_CGROUPS_MODULE_CGROUPS_NAME "/sys/fs/cgroup"
+
#define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001
#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002
#define CGROUP_OPTIONS_IS_UNIFIED 0x00000004
diff --git a/collectors/cgroups.plugin/tests/test_doubles.c b/collectors/cgroups.plugin/tests/test_doubles.c
index 498f649f5..b13d4b19c 100644
--- a/collectors/cgroups.plugin/tests/test_doubles.c
+++ b/collectors/cgroups.plugin/tests/test_doubles.c
@@ -2,12 +2,12 @@
#include "test_cgroups_plugin.h"
-void rrdset_is_obsolete(RRDSET *st)
+void rrdset_is_obsolete___safe_from_collector_thread(RRDSET *st)
{
UNUSED(st);
}
-void rrdset_isnot_obsolete(RRDSET *st)
+void rrdset_isnot_obsolete___safe_from_collector_thread(RRDSET *st)
{
UNUSED(st);
}