summaryrefslogtreecommitdiffstats
path: root/collectors/cgroups.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/cgroups.plugin')
-rw-r--r--collectors/cgroups.plugin/README.md8
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-name.sh26
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-network-helper.sh26
-rw-r--r--collectors/cgroups.plugin/cgroup-network.c8
-rw-r--r--collectors/cgroups.plugin/integrations/containers.md166
-rw-r--r--collectors/cgroups.plugin/integrations/kubernetes_containers.md184
-rw-r--r--collectors/cgroups.plugin/integrations/libvirt_containers.md166
-rw-r--r--collectors/cgroups.plugin/integrations/lxc_containers.md166
-rw-r--r--collectors/cgroups.plugin/integrations/ovirt_containers.md166
-rw-r--r--collectors/cgroups.plugin/integrations/proxmox_containers.md166
-rw-r--r--collectors/cgroups.plugin/integrations/systemd_services.md110
-rw-r--r--collectors/cgroups.plugin/integrations/virtual_machines.md166
-rw-r--r--collectors/cgroups.plugin/metadata.yaml204
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.c1202
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.h2
-rw-r--r--collectors/cgroups.plugin/tests/test_cgroups_plugin.c5
16 files changed, 1861 insertions, 910 deletions
diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md
index 2e4fff230..ba6a20e5e 100644
--- a/collectors/cgroups.plugin/README.md
+++ b/collectors/cgroups.plugin/README.md
@@ -139,10 +139,10 @@ chart instead of `auto` to enable it permanently. For example:
You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero
metrics for all internal Netdata plugins.
-### Alarms
+### Alerts
-CPU and memory limits are watched and used to rise alarms. Memory usage for every cgroup is checked against `ram`
-and `ram+swap` limits. CPU usage for every cgroup is checked against `cpuset.cpus` and `cpu.cfs_period_us` + `cpu.cfs_quota_us` pair assigned for the cgroup. Configuration for the alarms is available in `health.d/cgroups.conf`
+CPU and memory limits are watched and used to rise alerts. Memory usage for every cgroup is checked against `ram`
+and `ram+swap` limits. CPU usage for every cgroup is checked against `cpuset.cpus` and `cpu.cfs_period_us` + `cpu.cfs_quota_us` pair assigned for the cgroup. Configuration for the alerts is available in `health.d/cgroups.conf`
file.
## Monitoring systemd services
@@ -264,7 +264,7 @@ Network interfaces and cgroups (containers) are self-cleaned. When a network int
a few errors in error.log complaining about files it cannot find, but immediately:
1. It will detect this is a removed container or network interface
-2. It will freeze/pause all alarms for them
+2. It will freeze/pause all alerts for them
3. It will mark their charts as obsolete
4. Obsolete charts are not be offered on new dashboard sessions (so hit F5 and the charts are gone)
5. Existing dashboard sessions will continue to see them, but of course they will not refresh
diff --git a/collectors/cgroups.plugin/cgroup-name.sh b/collectors/cgroups.plugin/cgroup-name.sh
index 6edd9d9f0..c0f3d0cb6 100755
--- a/collectors/cgroups.plugin/cgroup-name.sh
+++ b/collectors/cgroups.plugin/cgroup-name.sh
@@ -16,6 +16,21 @@ export LC_ALL=C
PROGRAM_NAME="$(basename "${0}")"
+LOG_LEVEL_ERR=1
+LOG_LEVEL_WARN=2
+LOG_LEVEL_INFO=3
+LOG_LEVEL="$LOG_LEVEL_INFO"
+
+set_log_severity_level() {
+ case ${NETDATA_LOG_SEVERITY_LEVEL,,} in
+ "info") LOG_LEVEL="$LOG_LEVEL_INFO";;
+ "warn" | "warning") LOG_LEVEL="$LOG_LEVEL_WARN";;
+ "err" | "error") LOG_LEVEL="$LOG_LEVEL_ERR";;
+ esac
+}
+
+set_log_severity_level
+
logdate() {
date "+%Y-%m-%d %H:%M:%S"
}
@@ -28,18 +43,21 @@ log() {
}
+info() {
+ [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_INFO" -gt "$LOG_LEVEL" ]] && return
+ log INFO "${@}"
+}
+
warning() {
+ [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_WARN" -gt "$LOG_LEVEL" ]] && return
log WARNING "${@}"
}
error() {
+ [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_ERR" -gt "$LOG_LEVEL" ]] && return
log ERROR "${@}"
}
-info() {
- log INFO "${@}"
-}
-
fatal() {
log FATAL "${@}"
exit 1
diff --git a/collectors/cgroups.plugin/cgroup-network-helper.sh b/collectors/cgroups.plugin/cgroup-network-helper.sh
index 783332f73..008bc987f 100755
--- a/collectors/cgroups.plugin/cgroup-network-helper.sh
+++ b/collectors/cgroups.plugin/cgroup-network-helper.sh
@@ -31,6 +31,21 @@ export LC_ALL=C
PROGRAM_NAME="$(basename "${0}")"
+LOG_LEVEL_ERR=1
+LOG_LEVEL_WARN=2
+LOG_LEVEL_INFO=3
+LOG_LEVEL="$LOG_LEVEL_INFO"
+
+set_log_severity_level() {
+ case ${NETDATA_LOG_SEVERITY_LEVEL,,} in
+ "info") LOG_LEVEL="$LOG_LEVEL_INFO";;
+ "warn" | "warning") LOG_LEVEL="$LOG_LEVEL_WARN";;
+ "err" | "error") LOG_LEVEL="$LOG_LEVEL_ERR";;
+ esac
+}
+
+set_log_severity_level
+
logdate() {
date "+%Y-%m-%d %H:%M:%S"
}
@@ -43,18 +58,21 @@ log() {
}
+info() {
+ [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_INFO" -gt "$LOG_LEVEL" ]] && return
+ log INFO "${@}"
+}
+
warning() {
+ [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_WARN" -gt "$LOG_LEVEL" ]] && return
log WARNING "${@}"
}
error() {
+ [[ -n "$LOG_LEVEL" && "$LOG_LEVEL_ERR" -gt "$LOG_LEVEL" ]] && return
log ERROR "${@}"
}
-info() {
- log INFO "${@}"
-}
-
fatal() {
log FATAL "${@}"
exit 1
diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c
index a490df394..b00f246bb 100644
--- a/collectors/cgroups.plugin/cgroup-network.c
+++ b/collectors/cgroups.plugin/cgroup-network.c
@@ -11,9 +11,11 @@
#endif
char environment_variable2[FILENAME_MAX + 50] = "";
+char environment_variable3[FILENAME_MAX + 50] = "";
char *environment[] = {
"PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
environment_variable2,
+ environment_variable3,
NULL
};
@@ -671,6 +673,10 @@ int main(int argc, char **argv) {
// the first environment variable is a fixed PATH=
snprintfz(environment_variable2, sizeof(environment_variable2) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix);
+ char *s = getenv("NETDATA_LOG_SEVERITY_LEVEL");
+ if (s)
+ snprintfz(environment_variable3, sizeof(environment_variable3) - 1, "NETDATA_LOG_SEVERITY_LEVEL=%s", s);
+
// ------------------------------------------------------------------------
if(argc == 2 && (!strcmp(argv[1], "version") || !strcmp(argv[1], "-version") || !strcmp(argv[1], "--version") || !strcmp(argv[1], "-v") || !strcmp(argv[1], "-V"))) {
@@ -680,6 +686,8 @@ int main(int argc, char **argv) {
if(argc != 3)
usage();
+
+ log_set_global_severity_for_external_plugins();
int arg = 1;
int helper = 1;
diff --git a/collectors/cgroups.plugin/integrations/containers.md b/collectors/cgroups.plugin/integrations/containers.md
new file mode 100644
index 000000000..6dec9ce2b
--- /dev/null
+++ b/collectors/cgroups.plugin/integrations/containers.md
@@ -0,0 +1,166 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/containers.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
+sidebar_label: "Containers"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Containers and VMs"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Containers
+
+
+<img src="https://netdata.cloud/img/container.svg" width="150"/>
+
+
+Plugin: cgroups.plugin
+Module: /sys/fs/cgroup
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Containers for performance, resource usage, and health status.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per cgroup
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.cpu_limit | used | percentage |
+| cgroup.cpu | user, system | percentage |
+| cgroup.cpu_per_core | a dimension per core | percentage |
+| cgroup.throttled | throttled | percentage |
+| cgroup.throttled_duration | duration | ms |
+| cgroup.cpu_shares | shares | shares |
+| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
+| cgroup.writeback | dirty, writeback | MiB |
+| cgroup.mem_activity | in, out | MiB/s |
+| cgroup.pgfaults | pgfault, swap | MiB/s |
+| cgroup.mem_usage | ram, swap | MiB |
+| cgroup.mem_usage_limit | available, used | MiB |
+| cgroup.mem_utilization | utilization | percentage |
+| cgroup.mem_failcnt | failures | count |
+| cgroup.io | read, write | KiB/s |
+| cgroup.serviced_ops | read, write | operations/s |
+| cgroup.throttle_io | read, write | KiB/s |
+| cgroup.throttle_serviced_ops | read, write | operations/s |
+| cgroup.queued_ops | read, write | operations |
+| cgroup.merged_ops | read, write | operations/s |
+| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_some_pressure_stall_time | time | ms |
+| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_full_pressure_stall_time | time | ms |
+| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_some_pressure_stall_time | time | ms |
+| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_full_pressure_stall_time | time | ms |
+| cgroup.io_some_pressure | some10, some60, some300 | percentage |
+| cgroup.io_some_pressure_stall_time | time | ms |
+| cgroup.io_full_pressure | some10, some60, some300 | percentage |
+| cgroup.io_full_pressure_stall_time | time | ms |
+
+### Per cgroup network device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+| device | TBD |
+| interface_type | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.net_net | received, sent | kilobits/s |
+| cgroup.net_packets | received, sent, multicast | pps |
+| cgroup.net_errors | inbound, outbound | errors/s |
+| cgroup.net_drops | inbound, outbound | errors/s |
+| cgroup.net_fifo | receive, transmit | errors/s |
+| cgroup.net_compressed | receive, sent | pps |
+| cgroup.net_events | frames, collisions, carrier | events/s |
+| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
+| cgroup.net_carrier | up, down | state |
+| cgroup.net_mtu | mtu | octets |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
+| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
+| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
+| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/cgroups.plugin/integrations/kubernetes_containers.md b/collectors/cgroups.plugin/integrations/kubernetes_containers.md
new file mode 100644
index 000000000..4bfa55c6d
--- /dev/null
+++ b/collectors/cgroups.plugin/integrations/kubernetes_containers.md
@@ -0,0 +1,184 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/kubernetes_containers.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
+sidebar_label: "Kubernetes Containers"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Kubernetes"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kubernetes Containers
+
+
+<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/>
+
+
+Plugin: cgroups.plugin
+Module: /sys/fs/cgroup
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Containers for performance, resource usage, and health status.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per k8s cgroup
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| k8s_namespace | TBD |
+| k8s_pod_name | TBD |
+| k8s_pod_uid | TBD |
+| k8s_controller_kind | TBD |
+| k8s_controller_name | TBD |
+| k8s_node_name | TBD |
+| k8s_container_name | TBD |
+| k8s_container_id | TBD |
+| k8s_kind | TBD |
+| k8s_qos_class | TBD |
+| k8s_cluster_id | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| k8s.cgroup.cpu_limit | used | percentage |
+| k8s.cgroup.cpu | user, system | percentage |
+| k8s.cgroup.cpu_per_core | a dimension per core | percentage |
+| k8s.cgroup.throttled | throttled | percentage |
+| k8s.cgroup.throttled_duration | duration | ms |
+| k8s.cgroup.cpu_shares | shares | shares |
+| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
+| k8s.cgroup.writeback | dirty, writeback | MiB |
+| k8s.cgroup.mem_activity | in, out | MiB/s |
+| k8s.cgroup.pgfaults | pgfault, swap | MiB/s |
+| k8s.cgroup.mem_usage | ram, swap | MiB |
+| k8s.cgroup.mem_usage_limit | available, used | MiB |
+| k8s.cgroup.mem_utilization | utilization | percentage |
+| k8s.cgroup.mem_failcnt | failures | count |
+| k8s.cgroup.io | read, write | KiB/s |
+| k8s.cgroup.serviced_ops | read, write | operations/s |
+| k8s.cgroup.throttle_io | read, write | KiB/s |
+| k8s.cgroup.throttle_serviced_ops | read, write | operations/s |
+| k8s.cgroup.queued_ops | read, write | operations |
+| k8s.cgroup.merged_ops | read, write | operations/s |
+| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
+| k8s.cgroup.cpu_some_pressure_stall_time | time | ms |
+| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
+| k8s.cgroup.cpu_full_pressure_stall_time | time | ms |
+| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage |
+| k8s.cgroup.memory_some_pressure_stall_time | time | ms |
+| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage |
+| k8s.cgroup.memory_full_pressure_stall_time | time | ms |
+| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage |
+| k8s.cgroup.io_some_pressure_stall_time | time | ms |
+| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |
+| k8s.cgroup.io_full_pressure_stall_time | time | ms |
+
+### Per k8s cgroup network device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | TBD |
+| interface_type | TBD |
+| k8s_namespace | TBD |
+| k8s_pod_name | TBD |
+| k8s_pod_uid | TBD |
+| k8s_controller_kind | TBD |
+| k8s_controller_name | TBD |
+| k8s_node_name | TBD |
+| k8s_container_name | TBD |
+| k8s_container_id | TBD |
+| k8s_kind | TBD |
+| k8s_qos_class | TBD |
+| k8s_cluster_id | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| k8s.cgroup.net_net | received, sent | kilobits/s |
+| k8s.cgroup.net_packets | received, sent, multicast | pps |
+| k8s.cgroup.net_errors | inbound, outbound | errors/s |
+| k8s.cgroup.net_drops | inbound, outbound | errors/s |
+| k8s.cgroup.net_fifo | receive, transmit | errors/s |
+| k8s.cgroup.net_compressed | receive, sent | pps |
+| k8s.cgroup.net_events | frames, collisions, carrier | events/s |
+| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
+| k8s.cgroup.net_carrier | up, down | state |
+| k8s.cgroup.net_mtu | mtu | octets |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
+| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization |
+| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
+| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/cgroups.plugin/integrations/libvirt_containers.md b/collectors/cgroups.plugin/integrations/libvirt_containers.md
new file mode 100644
index 000000000..af0310b10
--- /dev/null
+++ b/collectors/cgroups.plugin/integrations/libvirt_containers.md
@@ -0,0 +1,166 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/libvirt_containers.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
+sidebar_label: "Libvirt Containers"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Containers and VMs"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Libvirt Containers
+
+
+<img src="https://netdata.cloud/img/libvirt.png" width="150"/>
+
+
+Plugin: cgroups.plugin
+Module: /sys/fs/cgroup
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Libvirt for performance, resource usage, and health status.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per cgroup
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.cpu_limit | used | percentage |
+| cgroup.cpu | user, system | percentage |
+| cgroup.cpu_per_core | a dimension per core | percentage |
+| cgroup.throttled | throttled | percentage |
+| cgroup.throttled_duration | duration | ms |
+| cgroup.cpu_shares | shares | shares |
+| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
+| cgroup.writeback | dirty, writeback | MiB |
+| cgroup.mem_activity | in, out | MiB/s |
+| cgroup.pgfaults | pgfault, swap | MiB/s |
+| cgroup.mem_usage | ram, swap | MiB |
+| cgroup.mem_usage_limit | available, used | MiB |
+| cgroup.mem_utilization | utilization | percentage |
+| cgroup.mem_failcnt | failures | count |
+| cgroup.io | read, write | KiB/s |
+| cgroup.serviced_ops | read, write | operations/s |
+| cgroup.throttle_io | read, write | KiB/s |
+| cgroup.throttle_serviced_ops | read, write | operations/s |
+| cgroup.queued_ops | read, write | operations |
+| cgroup.merged_ops | read, write | operations/s |
+| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_some_pressure_stall_time | time | ms |
+| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_full_pressure_stall_time | time | ms |
+| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_some_pressure_stall_time | time | ms |
+| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_full_pressure_stall_time | time | ms |
+| cgroup.io_some_pressure | some10, some60, some300 | percentage |
+| cgroup.io_some_pressure_stall_time | time | ms |
+| cgroup.io_full_pressure | some10, some60, some300 | percentage |
+| cgroup.io_full_pressure_stall_time | time | ms |
+
+### Per cgroup network device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+| device | TBD |
+| interface_type | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.net_net | received, sent | kilobits/s |
+| cgroup.net_packets | received, sent, multicast | pps |
+| cgroup.net_errors | inbound, outbound | errors/s |
+| cgroup.net_drops | inbound, outbound | errors/s |
+| cgroup.net_fifo | receive, transmit | errors/s |
+| cgroup.net_compressed | receive, sent | pps |
+| cgroup.net_events | frames, collisions, carrier | events/s |
+| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
+| cgroup.net_carrier | up, down | state |
+| cgroup.net_mtu | mtu | octets |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
+| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
+| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
+| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/cgroups.plugin/integrations/lxc_containers.md b/collectors/cgroups.plugin/integrations/lxc_containers.md
new file mode 100644
index 000000000..becc9ae17
--- /dev/null
+++ b/collectors/cgroups.plugin/integrations/lxc_containers.md
@@ -0,0 +1,166 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/lxc_containers.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
+sidebar_label: "LXC Containers"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Containers and VMs"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# LXC Containers
+
+
+<img src="https://netdata.cloud/img/lxc.png" width="150"/>
+
+
+Plugin: cgroups.plugin
+Module: /sys/fs/cgroup
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor LXC Containers for performance, resource usage, and health status.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per cgroup
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.cpu_limit | used | percentage |
+| cgroup.cpu | user, system | percentage |
+| cgroup.cpu_per_core | a dimension per core | percentage |
+| cgroup.throttled | throttled | percentage |
+| cgroup.throttled_duration | duration | ms |
+| cgroup.cpu_shares | shares | shares |
+| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
+| cgroup.writeback | dirty, writeback | MiB |
+| cgroup.mem_activity | in, out | MiB/s |
+| cgroup.pgfaults | pgfault, swap | MiB/s |
+| cgroup.mem_usage | ram, swap | MiB |
+| cgroup.mem_usage_limit | available, used | MiB |
+| cgroup.mem_utilization | utilization | percentage |
+| cgroup.mem_failcnt | failures | count |
+| cgroup.io | read, write | KiB/s |
+| cgroup.serviced_ops | read, write | operations/s |
+| cgroup.throttle_io | read, write | KiB/s |
+| cgroup.throttle_serviced_ops | read, write | operations/s |
+| cgroup.queued_ops | read, write | operations |
+| cgroup.merged_ops | read, write | operations/s |
+| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_some_pressure_stall_time | time | ms |
+| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_full_pressure_stall_time | time | ms |
+| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_some_pressure_stall_time | time | ms |
+| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_full_pressure_stall_time | time | ms |
+| cgroup.io_some_pressure | some10, some60, some300 | percentage |
+| cgroup.io_some_pressure_stall_time | time | ms |
+| cgroup.io_full_pressure | some10, some60, some300 | percentage |
+| cgroup.io_full_pressure_stall_time | time | ms |
+
+### Per cgroup network device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+| device | TBD |
+| interface_type | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.net_net | received, sent | kilobits/s |
+| cgroup.net_packets | received, sent, multicast | pps |
+| cgroup.net_errors | inbound, outbound | errors/s |
+| cgroup.net_drops | inbound, outbound | errors/s |
+| cgroup.net_fifo | receive, transmit | errors/s |
+| cgroup.net_compressed | receive, sent | pps |
+| cgroup.net_events | frames, collisions, carrier | events/s |
+| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
+| cgroup.net_carrier | up, down | state |
+| cgroup.net_mtu | mtu | octets |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
+| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
+| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
+| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/cgroups.plugin/integrations/ovirt_containers.md b/collectors/cgroups.plugin/integrations/ovirt_containers.md
new file mode 100644
index 000000000..c9f6d74b7
--- /dev/null
+++ b/collectors/cgroups.plugin/integrations/ovirt_containers.md
@@ -0,0 +1,166 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/ovirt_containers.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
+sidebar_label: "oVirt Containers"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Containers and VMs"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# oVirt Containers
+
+
+<img src="https://netdata.cloud/img/ovirt.svg" width="150"/>
+
+
+Plugin: cgroups.plugin
+Module: /sys/fs/cgroup
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor oVirt for performance, resource usage, and health status.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per cgroup
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.cpu_limit | used | percentage |
+| cgroup.cpu | user, system | percentage |
+| cgroup.cpu_per_core | a dimension per core | percentage |
+| cgroup.throttled | throttled | percentage |
+| cgroup.throttled_duration | duration | ms |
+| cgroup.cpu_shares | shares | shares |
+| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
+| cgroup.writeback | dirty, writeback | MiB |
+| cgroup.mem_activity | in, out | MiB/s |
+| cgroup.pgfaults | pgfault, swap | MiB/s |
+| cgroup.mem_usage | ram, swap | MiB |
+| cgroup.mem_usage_limit | available, used | MiB |
+| cgroup.mem_utilization | utilization | percentage |
+| cgroup.mem_failcnt | failures | count |
+| cgroup.io | read, write | KiB/s |
+| cgroup.serviced_ops | read, write | operations/s |
+| cgroup.throttle_io | read, write | KiB/s |
+| cgroup.throttle_serviced_ops | read, write | operations/s |
+| cgroup.queued_ops | read, write | operations |
+| cgroup.merged_ops | read, write | operations/s |
+| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_some_pressure_stall_time | time | ms |
+| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_full_pressure_stall_time | time | ms |
+| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_some_pressure_stall_time | time | ms |
+| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_full_pressure_stall_time | time | ms |
+| cgroup.io_some_pressure | some10, some60, some300 | percentage |
+| cgroup.io_some_pressure_stall_time | time | ms |
+| cgroup.io_full_pressure | some10, some60, some300 | percentage |
+| cgroup.io_full_pressure_stall_time | time | ms |
+
+### Per cgroup network device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+| device | TBD |
+| interface_type | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.net_net | received, sent | kilobits/s |
+| cgroup.net_packets | received, sent, multicast | pps |
+| cgroup.net_errors | inbound, outbound | errors/s |
+| cgroup.net_drops | inbound, outbound | errors/s |
+| cgroup.net_fifo | receive, transmit | errors/s |
+| cgroup.net_compressed | receive, sent | pps |
+| cgroup.net_events | frames, collisions, carrier | events/s |
+| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
+| cgroup.net_carrier | up, down | state |
+| cgroup.net_mtu | mtu | octets |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
+| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
+| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
+| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/cgroups.plugin/integrations/proxmox_containers.md b/collectors/cgroups.plugin/integrations/proxmox_containers.md
new file mode 100644
index 000000000..2caad5eac
--- /dev/null
+++ b/collectors/cgroups.plugin/integrations/proxmox_containers.md
@@ -0,0 +1,166 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/proxmox_containers.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
+sidebar_label: "Proxmox Containers"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Containers and VMs"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Proxmox Containers
+
+
+<img src="https://netdata.cloud/img/proxmox.png" width="150"/>
+
+
+Plugin: cgroups.plugin
+Module: /sys/fs/cgroup
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Proxmox for performance, resource usage, and health status.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per cgroup
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.cpu_limit | used | percentage |
+| cgroup.cpu | user, system | percentage |
+| cgroup.cpu_per_core | a dimension per core | percentage |
+| cgroup.throttled | throttled | percentage |
+| cgroup.throttled_duration | duration | ms |
+| cgroup.cpu_shares | shares | shares |
+| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
+| cgroup.writeback | dirty, writeback | MiB |
+| cgroup.mem_activity | in, out | MiB/s |
+| cgroup.pgfaults | pgfault, swap | MiB/s |
+| cgroup.mem_usage | ram, swap | MiB |
+| cgroup.mem_usage_limit | available, used | MiB |
+| cgroup.mem_utilization | utilization | percentage |
+| cgroup.mem_failcnt | failures | count |
+| cgroup.io | read, write | KiB/s |
+| cgroup.serviced_ops | read, write | operations/s |
+| cgroup.throttle_io | read, write | KiB/s |
+| cgroup.throttle_serviced_ops | read, write | operations/s |
+| cgroup.queued_ops | read, write | operations |
+| cgroup.merged_ops | read, write | operations/s |
+| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_some_pressure_stall_time | time | ms |
+| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_full_pressure_stall_time | time | ms |
+| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_some_pressure_stall_time | time | ms |
+| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_full_pressure_stall_time | time | ms |
+| cgroup.io_some_pressure | some10, some60, some300 | percentage |
+| cgroup.io_some_pressure_stall_time | time | ms |
+| cgroup.io_full_pressure | some10, some60, some300 | percentage |
+| cgroup.io_full_pressure_stall_time | time | ms |
+
+### Per cgroup network device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+| device | TBD |
+| interface_type | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.net_net | received, sent | kilobits/s |
+| cgroup.net_packets | received, sent, multicast | pps |
+| cgroup.net_errors | inbound, outbound | errors/s |
+| cgroup.net_drops | inbound, outbound | errors/s |
+| cgroup.net_fifo | receive, transmit | errors/s |
+| cgroup.net_compressed | receive, sent | pps |
+| cgroup.net_events | frames, collisions, carrier | events/s |
+| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
+| cgroup.net_carrier | up, down | state |
+| cgroup.net_mtu | mtu | octets |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
+| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
+| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
+| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/cgroups.plugin/integrations/systemd_services.md b/collectors/cgroups.plugin/integrations/systemd_services.md
new file mode 100644
index 000000000..b71060050
--- /dev/null
+++ b/collectors/cgroups.plugin/integrations/systemd_services.md
@@ -0,0 +1,110 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/systemd_services.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
+sidebar_label: "Systemd Services"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Systemd"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Systemd Services
+
+
+<img src="https://netdata.cloud/img/systemd.svg" width="150"/>
+
+
+Plugin: cgroups.plugin
+Module: /sys/fs/cgroup
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Containers for performance, resource usage, and health status.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per systemd service
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| service_name | Service name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| systemd.service.cpu.utilization | user, system | percentage |
+| systemd.service.memory.usage | ram, swap | MiB |
+| systemd.service.memory.failcnt | fail | failures/s |
+| systemd.service.memory.ram.usage | rss, cache, mapped_file, rss_huge | MiB |
+| systemd.service.memory.writeback | writeback, dirty | MiB |
+| systemd.service.memory.paging.faults | minor, major | MiB/s |
+| systemd.service.memory.paging.io | in, out | MiB/s |
+| systemd.service.disk.io | read, write | KiB/s |
+| systemd.service.disk.iops | read, write | operations/s |
+| systemd.service.disk.throttle.io | read, write | KiB/s |
+| systemd.service.disk.throttle.iops | read, write | operations/s |
+| systemd.service.disk.queued_iops | read, write | operations/s |
+| systemd.service.disk.merged_iops | read, write | operations/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/cgroups.plugin/integrations/virtual_machines.md b/collectors/cgroups.plugin/integrations/virtual_machines.md
new file mode 100644
index 000000000..3bb79c128
--- /dev/null
+++ b/collectors/cgroups.plugin/integrations/virtual_machines.md
@@ -0,0 +1,166 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/virtual_machines.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
+sidebar_label: "Virtual Machines"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Containers and VMs"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Virtual Machines
+
+
+<img src="https://netdata.cloud/img/container.svg" width="150"/>
+
+
+Plugin: cgroups.plugin
+Module: /sys/fs/cgroup
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Virtual Machines for performance, resource usage, and health status.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per cgroup
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.cpu_limit | used | percentage |
+| cgroup.cpu | user, system | percentage |
+| cgroup.cpu_per_core | a dimension per core | percentage |
+| cgroup.throttled | throttled | percentage |
+| cgroup.throttled_duration | duration | ms |
+| cgroup.cpu_shares | shares | shares |
+| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
+| cgroup.writeback | dirty, writeback | MiB |
+| cgroup.mem_activity | in, out | MiB/s |
+| cgroup.pgfaults | pgfault, swap | MiB/s |
+| cgroup.mem_usage | ram, swap | MiB |
+| cgroup.mem_usage_limit | available, used | MiB |
+| cgroup.mem_utilization | utilization | percentage |
+| cgroup.mem_failcnt | failures | count |
+| cgroup.io | read, write | KiB/s |
+| cgroup.serviced_ops | read, write | operations/s |
+| cgroup.throttle_io | read, write | KiB/s |
+| cgroup.throttle_serviced_ops | read, write | operations/s |
+| cgroup.queued_ops | read, write | operations |
+| cgroup.merged_ops | read, write | operations/s |
+| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_some_pressure_stall_time | time | ms |
+| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
+| cgroup.cpu_full_pressure_stall_time | time | ms |
+| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_some_pressure_stall_time | time | ms |
+| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
+| cgroup.memory_full_pressure_stall_time | time | ms |
+| cgroup.io_some_pressure | some10, some60, some300 | percentage |
+| cgroup.io_some_pressure_stall_time | time | ms |
+| cgroup.io_full_pressure | some10, some60, some300 | percentage |
+| cgroup.io_full_pressure_stall_time | time | ms |
+
+### Per cgroup network device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | TBD |
+| image | TBD |
+| device | TBD |
+| interface_type | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cgroup.net_net | received, sent | kilobits/s |
+| cgroup.net_packets | received, sent, multicast | pps |
+| cgroup.net_errors | inbound, outbound | errors/s |
+| cgroup.net_drops | inbound, outbound | errors/s |
+| cgroup.net_fifo | receive, transmit | errors/s |
+| cgroup.net_compressed | receive, sent | pps |
+| cgroup.net_events | frames, collisions, carrier | events/s |
+| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
+| cgroup.net_carrier | up, down | state |
+| cgroup.net_mtu | mtu | octets |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
+| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
+| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
+| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/cgroups.plugin/metadata.yaml b/collectors/cgroups.plugin/metadata.yaml
index b342d30a3..ec6228ea2 100644
--- a/collectors/cgroups.plugin/metadata.yaml
+++ b/collectors/cgroups.plugin/metadata.yaml
@@ -406,7 +406,7 @@ modules:
link: https://kubernetes.io/
icon_filename: kubernetes.svg
categories:
- - data-collection.containers-and-vms
+ #- data-collection.containers-and-vms
- data-collection.kubernetes
keywords:
- k8s
@@ -821,154 +821,104 @@ modules:
description: ""
availability: []
scopes:
- - name: global
+ - name: systemd service
description: ""
- labels: []
+ labels:
+ - name: service_name
+ description: Service name
metrics:
- - name: services.cpu
+ - name: systemd.service.cpu.utilization
description: Systemd Services CPU utilization (100% = 1 core)
- unit: "percentage"
+ unit: percentage
chart_type: stacked
dimensions:
- - name: a dimension per systemd service
- - name: services.mem_usage
+ - name: user
+ - name: system
+ - name: systemd.service.memory.usage
description: Systemd Services Used Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.mem_rss
- description: Systemd Services RSS Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.mem_mapped
- description: Systemd Services Mapped Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.mem_cache
- description: Systemd Services Cache Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.mem_writeback
- description: Systemd Services Writeback Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.mem_pgfault
- description: Systemd Services Memory Minor Page Faults
- unit: "MiB/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.mem_pgmajfault
- description: Systemd Services Memory Major Page Faults
- unit: "MiB/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.mem_pgpgin
- description: Systemd Services Memory Charging Activity
- unit: "MiB/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.mem_pgpgout
- description: Systemd Services Memory Uncharging Activity
- unit: "MiB/s"
+ unit: MiB
chart_type: stacked
dimensions:
- - name: a dimension per systemd service
- - name: services.mem_failcnt
+ - name: ram
+ - name: swap
+ - name: systemd.service.memory.failcnt
description: Systemd Services Memory Limit Failures
- unit: "failures"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.swap_usage
- description: Systemd Services Swap Memory Used
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.io_read
- description: Systemd Services Disk Read Bandwidth
- unit: "KiB/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.io_write
- description: Systemd Services Disk Write Bandwidth
- unit: "KiB/s"
- chart_type: stacked
+ unit: failures/s
+ chart_type: line
dimensions:
- - name: a dimension per systemd service
- - name: services.io_ops_read
- description: Systemd Services Disk Read Operations
- unit: "operations/s"
+ - name: fail
+ - name: systemd.service.memory.ram.usage
+ description: Systemd Services Memory
+ unit: MiB
chart_type: stacked
dimensions:
- - name: a dimension per systemd service
- - name: services.io_ops_write
- description: Systemd Services Disk Write Operations
- unit: "operations/s"
+ - name: rss
+ - name: cache
+ - name: mapped_file
+ - name: rss_huge
+ - name: systemd.service.memory.writeback
+ description: Systemd Services Writeback Memory
+ unit: MiB
chart_type: stacked
dimensions:
- - name: a dimension per systemd service
- - name: services.throttle_io_read
- description: Systemd Services Throttle Disk Read Bandwidth
- unit: "KiB/s"
- chart_type: stacked
+ - name: writeback
+ - name: dirty
+ - name: systemd.service.memory.paging.faults
+ description: Systemd Services Memory Minor and Major Page Faults
+ unit: MiB/s
+ chart_type: area
dimensions:
- - name: a dimension per systemd service
- - name: services.services.throttle_io_write
- description: Systemd Services Throttle Disk Write Bandwidth
- unit: "KiB/s"
- chart_type: stacked
+ - name: minor
+ - name: major
+ - name: systemd.service.memory.paging.io
+ description: Systemd Services Memory Paging IO
+ unit: MiB/s
+ chart_type: area
dimensions:
- - name: a dimension per systemd service
- - name: services.throttle_io_ops_read
- description: Systemd Services Throttle Disk Read Operations
- unit: "operations/s"
- chart_type: stacked
+ - name: in
+ - name: out
+ - name: systemd.service.disk.io
+ description: Systemd Services Disk Read/Write Bandwidth
+ unit: KiB/s
+ chart_type: area
dimensions:
- - name: a dimension per systemd service
- - name: throttle_io_ops_write
- description: Systemd Services Throttle Disk Write Operations
- unit: "operations/s"
- chart_type: stacked
+ - name: read
+ - name: write
+ - name: systemd.service.disk.iops
+ description: Systemd Services Disk Read/Write Operations
+ unit: operations/s
+ chart_type: line
dimensions:
- - name: a dimension per systemd service
- - name: services.queued_io_ops_read
- description: Systemd Services Queued Disk Read Operations
- unit: "operations/s"
- chart_type: stacked
+ - name: read
+ - name: write
+ - name: systemd.service.disk.throttle.io
+ description: Systemd Services Throttle Disk Read/Write Bandwidth
+ unit: KiB/s
+ chart_type: area
dimensions:
- - name: a dimension per systemd service
- - name: services.queued_io_ops_write
- description: Systemd Services Queued Disk Write Operations
- unit: "operations/s"
- chart_type: stacked
+ - name: read
+ - name: write
+ - name: systemd.service.disk.throttle.iops
+ description: Systemd Services Throttle Disk Read/Write Operations
+ unit: operations/s
+ chart_type: line
dimensions:
- - name: a dimension per systemd service
- - name: services.merged_io_ops_read
- description: Systemd Services Merged Disk Read Operations
- unit: "operations/s"
- chart_type: stacked
+ - name: read
+ - name: write
+ - name: systemd.service.disk.queued_iops
+ description: Systemd Services Queued Disk Read/Write Operations
+ unit: operations/s
+ chart_type: line
dimensions:
- - name: a dimension per systemd service
- - name: services.merged_io_ops_write
- description: Systemd Services Merged Disk Write Operations
- unit: "operations/s"
- chart_type: stacked
+ - name: read
+ - name: write
+ - name: systemd.service.disk.merged_iops
+ description: Systemd Services Merged Disk Read/Write Operations
+ unit: operations/s
+ chart_type: line
dimensions:
- - name: a dimension per systemd service
+ - name: read
+ - name: write
- <<: *module
meta:
<<: *meta
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c
index 9c7488c82..3bb8e7d3e 100644
--- a/collectors/cgroups.plugin/sys_fs_cgroup.c
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.c
@@ -38,6 +38,7 @@
// cgroup globals
static char cgroup_chart_id_prefix[] = "cgroup_";
+static char services_chart_id_prefix[] = "systemd_";
static int is_inside_k8s = 0;
@@ -796,17 +797,20 @@ struct cgroup {
char enabled; // enabled in the config
char pending_renames;
- char *intermediate_id; // TODO: remove it when the renaming script is fixed
char *id;
uint32_t hash;
+ char *intermediate_id; // TODO: remove it when the renaming script is fixed
+
char *chart_id;
- uint32_t hash_chart;
+ uint32_t hash_chart_id;
- char *chart_title;
+ // 'cgroup_name' label value.
+ // By default this is the *id (path), later changed to the resolved name (cgroup-name.sh) or systemd service name.
+ char *name;
- DICTIONARY *chart_labels;
+ RRDLABELS *chart_labels;
int container_orchestrator;
@@ -878,35 +882,6 @@ struct cgroup {
unsigned long long memoryswap_limit;
const RRDSETVAR_ACQUIRED *chart_var_memoryswap_limit;
- // services
- RRDDIM *rd_cpu;
- RRDDIM *rd_mem_usage;
- RRDDIM *rd_mem_failcnt;
- RRDDIM *rd_swap_usage;
-
- RRDDIM *rd_mem_detailed_cache;
- RRDDIM *rd_mem_detailed_rss;
- RRDDIM *rd_mem_detailed_mapped;
- RRDDIM *rd_mem_detailed_writeback;
- RRDDIM *rd_mem_detailed_pgpgin;
- RRDDIM *rd_mem_detailed_pgpgout;
- RRDDIM *rd_mem_detailed_pgfault;
- RRDDIM *rd_mem_detailed_pgmajfault;
-
- RRDDIM *rd_io_service_bytes_read;
- RRDDIM *rd_io_serviced_read;
- RRDDIM *rd_throttle_io_read;
- RRDDIM *rd_throttle_io_serviced_read;
- RRDDIM *rd_io_queued_read;
- RRDDIM *rd_io_merged_read;
-
- RRDDIM *rd_io_service_bytes_write;
- RRDDIM *rd_io_serviced_write;
- RRDDIM *rd_throttle_io_write;
- RRDDIM *rd_throttle_io_serviced_write;
- RRDDIM *rd_io_queued_write;
- RRDDIM *rd_io_merged_write;
-
struct cgroup *next;
struct cgroup *discovered_next;
@@ -1667,7 +1642,7 @@ static inline void read_all_discovered_cgroups(struct cgroup *root) {
#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
static inline void read_cgroup_network_interfaces(struct cgroup *cg) {
- netdata_log_debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title);
+ netdata_log_debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id);
pid_t cgroup_pid;
char cgroup_identifier[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
@@ -1747,17 +1722,6 @@ static inline void free_cgroup_network_interfaces(struct cgroup *cg) {
#define CGROUP_CHARTID_LINE_MAX 1024
-static inline char *cgroup_title_strdupz(const char *s) {
- if(!s || !*s) s = "/";
-
- if(*s == '/' && s[1] != '\0') s++;
-
- char *r = strdupz(s);
- netdata_fix_chart_name(r);
-
- return r;
-}
-
static inline char *cgroup_chart_id_strdupz(const char *s) {
if(!s || !*s) s = "/";
@@ -1781,14 +1745,14 @@ static inline void substitute_dots_in_id(char *s) {
// ----------------------------------------------------------------------------
// parse k8s labels
-char *cgroup_parse_resolved_name_and_labels(DICTIONARY *labels, char *data) {
+char *cgroup_parse_resolved_name_and_labels(RRDLABELS *labels, char *data) {
// the first word, up to the first space is the name
char *name = strsep_skip_consecutive_separators(&data, " ");
// the rest are key=value pairs separated by comma
while(data) {
char *pair = strsep_skip_consecutive_separators(&data, ",");
- rrdlabels_add_pair(labels, pair, RRDLABEL_SRC_AUTO| RRDLABEL_SRC_K8S);
+ rrdlabels_add_pair(labels, pair, RRDLABEL_SRC_AUTO | RRDLABEL_SRC_K8S);
}
return name;
@@ -1866,7 +1830,7 @@ static inline void cgroup_free(struct cgroup *cg) {
freez(cg->id);
freez(cg->intermediate_id);
freez(cg->chart_id);
- freez(cg->chart_title);
+ freez(cg->name);
rrdlabels_destroy(cg->chart_labels);
@@ -1883,7 +1847,7 @@ static inline void discovery_rename_cgroup(struct cgroup *cg) {
}
cg->pending_renames--;
- netdata_log_debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s' and title '%s'", cg->id, cg->chart_id, cg->chart_title);
+ netdata_log_debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id);
netdata_log_debug(D_CGROUP, "executing command %s \"%s\" for cgroup '%s'", cgroups_rename_script, cg->intermediate_id, cg->chart_id);
pid_t cgroup_pid;
@@ -1927,14 +1891,14 @@ static inline void discovery_rename_cgroup(struct cgroup *cg) {
name = cgroup_parse_resolved_name_and_labels(cg->chart_labels, new_name);
rrdlabels_remove_all_unmarked(cg->chart_labels);
- freez(cg->chart_title);
- cg->chart_title = cgroup_title_strdupz(name);
+ freez(cg->name);
+ cg->name = strdupz(name);
freez(cg->chart_id);
cg->chart_id = cgroup_chart_id_strdupz(name);
substitute_dots_in_id(cg->chart_id);
- cg->hash_chart = simple_hash(cg->chart_id);
+ cg->hash_chart_id = simple_hash(cg->chart_id);
}
static void is_cgroup_procs_exist(netdata_ebpf_cgroup_shm_body_t *out, char *id) {
@@ -1992,21 +1956,31 @@ static inline void convert_cgroup_to_systemd_service(struct cgroup *cg) {
s[len] = '\0';
}
- freez(cg->chart_title);
- cg->chart_title = cgroup_title_strdupz(s);
+ freez(cg->name);
+ cg->name = strdupz(s);
+
+ freez(cg->chart_id);
+ cg->chart_id = cgroup_chart_id_strdupz(s);
+ substitute_dots_in_id(cg->chart_id);
+ cg->hash_chart_id = simple_hash(cg->chart_id);
}
static inline struct cgroup *discovery_cgroup_add(const char *id) {
netdata_log_debug(D_CGROUP, "adding to list, cgroup with id '%s'", id);
struct cgroup *cg = callocz(1, sizeof(struct cgroup));
+
cg->id = strdupz(id);
cg->hash = simple_hash(cg->id);
- cg->chart_title = cgroup_title_strdupz(id);
+
+ cg->name = strdupz(id);
+
cg->intermediate_id = cgroup_chart_id_strdupz(id);
+
cg->chart_id = cgroup_chart_id_strdupz(id);
substitute_dots_in_id(cg->chart_id);
- cg->hash_chart = simple_hash(cg->chart_id);
+ cg->hash_chart_id = simple_hash(cg->chart_id);
+
if (cgroup_use_unified_cgroups) {
cg->options |= CGROUP_OPTIONS_IS_UNIFIED;
}
@@ -2500,8 +2474,10 @@ static inline void discovery_cleanup_all_cgroups() {
// enable the first duplicate cgroup
{
struct cgroup *t;
- for(t = discovered_cgroup_root; t ; t = t->discovered_next) {
- if(t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE && t->hash_chart == cg->hash_chart && !strcmp(t->chart_id, cg->chart_id)) {
+ for (t = discovered_cgroup_root; t; t = t->discovered_next) {
+ if (t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE &&
+ (is_cgroup_systemd_service(t) == is_cgroup_systemd_service(cg)) &&
+ t->hash_chart_id == cg->hash_chart_id && !strcmp(t->chart_id, cg->chart_id)) {
netdata_log_debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id);
t->enabled = 1;
t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE;
@@ -2553,8 +2529,8 @@ static inline void discovery_share_cgroups_with_ebpf() {
for (cg = cgroup_root, count = 0; cg; cg = cg->next, count++) {
netdata_ebpf_cgroup_shm_body_t *ptr = &shm_cgroup_ebpf.body[count];
- char *prefix = (is_cgroup_systemd_service(cg)) ? "" : "cgroup_";
- snprintfz(ptr->name, CGROUP_EBPF_NAME_SHARED_LENGTH - 1, "%s%s", prefix, cg->chart_title);
+ char *prefix = (is_cgroup_systemd_service(cg)) ? services_chart_id_prefix : cgroup_chart_id_prefix;
+ snprintfz(ptr->name, CGROUP_EBPF_NAME_SHARED_LENGTH - 1, "%s%s", prefix, cg->chart_id);
ptr->hash = simple_hash(ptr->name);
ptr->options = cg->options;
ptr->enabled = cg->enabled;
@@ -2658,13 +2634,13 @@ static inline void discovery_process_first_time_seen_cgroup(struct cgroup *cg) {
}
if (cgroup_enable_systemd_services && matches_systemd_services_cgroups(cg->id)) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'cgroups to match as systemd services'", cg->id, cg->chart_title);
+ netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'cgroups to match as systemd services'", cg->id, cg->chart_id);
convert_cgroup_to_systemd_service(cg);
return;
}
if (matches_enabled_cgroup_renames(cg->id)) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'run script to rename cgroups matching', will try to rename it", cg->id, cg->chart_title);
+ netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'run script to rename cgroups matching', will try to rename it", cg->id, cg->chart_id);
if (is_inside_k8s && k8s_is_container(cg->id)) {
// it may take up to a minute for the K8s API to return data for the container
// tested on AWS K8s cluster with 100% CPU utilization
@@ -2676,15 +2652,20 @@ static inline void discovery_process_first_time_seen_cgroup(struct cgroup *cg) {
}
static int discovery_is_cgroup_duplicate(struct cgroup *cg) {
- // https://github.com/netdata/netdata/issues/797#issuecomment-241248884
- struct cgroup *c;
- for (c = discovered_cgroup_root; c; c = c->discovered_next) {
- if (c != cg && c->enabled && c->hash_chart == cg->hash_chart && !strcmp(c->chart_id, cg->chart_id)) {
- collector_error("CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.", cg->chart_id, c->id, cg->id);
- return 1;
- }
- }
- return 0;
+ // https://github.com/netdata/netdata/issues/797#issuecomment-241248884
+ struct cgroup *c;
+ for (c = discovered_cgroup_root; c; c = c->discovered_next) {
+ if (c != cg && c->enabled && (is_cgroup_systemd_service(c) == is_cgroup_systemd_service(cg)) &&
+ c->hash_chart_id == cg->hash_chart_id && !strcmp(c->chart_id, cg->chart_id)) {
+ collector_error(
+ "CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
+ cg->chart_id,
+ c->id,
+ cg->id);
+ return 1;
+ }
+ }
+ return 0;
}
static inline void discovery_process_cgroup(struct cgroup *cg) {
@@ -2720,17 +2701,25 @@ static inline void discovery_process_cgroup(struct cgroup *cg) {
}
if (is_cgroup_systemd_service(cg)) {
+ if (discovery_is_cgroup_duplicate(cg)) {
+ cg->enabled = 0;
+ cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
+ return;
+ }
+ if (!cg->chart_labels)
+ cg->chart_labels = rrdlabels_create();
+ rrdlabels_add(cg->chart_labels, "service_name", cg->name, RRDLABEL_SRC_AUTO);
cg->enabled = 1;
return;
}
- if (!(cg->enabled = matches_enabled_cgroup_names(cg->chart_title))) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups names matching'", cg->id, cg->chart_title);
+ if (!(cg->enabled = matches_enabled_cgroup_names(cg->name))) {
+ netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups names matching'", cg->id, cg->name);
return;
}
if (!(cg->enabled = matches_enabled_cgroup_paths(cg->id))) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups matching'", cg->id, cg->chart_title);
+ netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups matching'", cg->id, cg->name);
return;
}
@@ -2744,10 +2733,9 @@ static inline void discovery_process_cgroup(struct cgroup *cg) {
cg->chart_labels = rrdlabels_create();
if (!k8s_is_kubepod(cg)) {
- rrdlabels_add(cg->chart_labels, "cgroup_name", cg->chart_id, RRDLABEL_SRC_AUTO);
- if (!dictionary_get(cg->chart_labels, "image")) {
+ rrdlabels_add(cg->chart_labels, "cgroup_name", cg->name, RRDLABEL_SRC_AUTO);
+ if (!rrdlabels_exist(cg->chart_labels, "image"))
rrdlabels_add(cg->chart_labels, "image", "", RRDLABEL_SRC_AUTO);
- }
}
worker_is_busy(WORKER_DISCOVERY_PROCESS_NETWORK);
@@ -2801,6 +2789,19 @@ static void cgroup_discovery_cleanup(void *ptr) {
service_exits();
}
+static inline char *cgroup_chart_type(char *buffer, struct cgroup *cg) {
+ if(buffer[0]) return buffer;
+
+ if (cg->chart_id[0] == '\0' || (cg->chart_id[0] == '/' && cg->chart_id[1] == '\0'))
+ strncpy(buffer, "cgroup_root", RRD_ID_LENGTH_MAX);
+ else if (is_cgroup_systemd_service(cg))
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", services_chart_id_prefix, cg->chart_id);
+ else
+ snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", cgroup_chart_id_prefix, cg->chart_id);
+
+ return buffer;
+}
+
void cgroup_discovery_worker(void *ptr)
{
UNUSED(ptr);
@@ -2850,709 +2851,376 @@ void cgroup_discovery_worker(void *ptr)
#define CHART_TITLE_MAX 300
void update_systemd_services_charts(
- int update_every
- , int do_cpu
- , int do_mem_usage
- , int do_mem_detailed
- , int do_mem_failcnt
- , int do_swap_usage
- , int do_io
- , int do_io_ops
- , int do_throttle_io
- , int do_throttle_ops
- , int do_queued_ops
- , int do_merged_ops
-) {
- static RRDSET
- *st_cpu = NULL,
- *st_mem_usage = NULL,
- *st_mem_failcnt = NULL,
- *st_swap_usage = NULL,
-
- *st_mem_detailed_cache = NULL,
- *st_mem_detailed_rss = NULL,
- *st_mem_detailed_mapped = NULL,
- *st_mem_detailed_writeback = NULL,
- *st_mem_detailed_pgfault = NULL,
- *st_mem_detailed_pgmajfault = NULL,
- *st_mem_detailed_pgpgin = NULL,
- *st_mem_detailed_pgpgout = NULL,
-
- *st_io_read = NULL,
- *st_io_serviced_read = NULL,
- *st_throttle_io_read = NULL,
- *st_throttle_ops_read = NULL,
- *st_queued_ops_read = NULL,
- *st_merged_ops_read = NULL,
-
- *st_io_write = NULL,
- *st_io_serviced_write = NULL,
- *st_throttle_io_write = NULL,
- *st_throttle_ops_write = NULL,
- *st_queued_ops_write = NULL,
- *st_merged_ops_write = NULL;
-
- // create the charts
-
- if (unlikely(do_cpu && !st_cpu)) {
- char title[CHART_TITLE_MAX + 1];
- snprintfz(title, CHART_TITLE_MAX, "Systemd Services CPU utilization (100%% = 1 core)");
-
- st_cpu = rrdset_create_localhost(
- "services"
- , "cpu"
- , NULL
- , "cpu"
- , "services.cpu"
- , title
- , "percentage"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if (unlikely(do_mem_usage && !st_mem_usage)) {
- st_mem_usage = rrdset_create_localhost(
- "services"
- , "mem_usage"
- , NULL
- , "mem"
- , "services.mem_usage"
- , "Systemd Services Used Memory"
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(likely(do_mem_detailed)) {
- if(unlikely(!st_mem_detailed_rss)) {
- st_mem_detailed_rss = rrdset_create_localhost(
- "services"
- , "mem_rss"
- , NULL
- , "mem"
- , "services.mem_rss"
- , "Systemd Services RSS Memory"
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_mem_detailed_mapped)) {
- st_mem_detailed_mapped = rrdset_create_localhost(
- "services"
- , "mem_mapped"
- , NULL
- , "mem"
- , "services.mem_mapped"
- , "Systemd Services Mapped Memory"
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_mem_detailed_cache)) {
- st_mem_detailed_cache = rrdset_create_localhost(
- "services"
- , "mem_cache"
- , NULL
- , "mem"
- , "services.mem_cache"
- , "Systemd Services Cache Memory"
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_mem_detailed_writeback)) {
- st_mem_detailed_writeback = rrdset_create_localhost(
- "services"
- , "mem_writeback"
- , NULL
- , "mem"
- , "services.mem_writeback"
- , "Systemd Services Writeback Memory"
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- }
-
- if(unlikely(!st_mem_detailed_pgfault)) {
- st_mem_detailed_pgfault = rrdset_create_localhost(
- "services"
- , "mem_pgfault"
- , NULL
- , "mem"
- , "services.mem_pgfault"
- , "Systemd Services Memory Minor Page Faults"
- , "MiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_mem_detailed_pgmajfault)) {
- st_mem_detailed_pgmajfault = rrdset_create_localhost(
- "services"
- , "mem_pgmajfault"
- , NULL
- , "mem"
- , "services.mem_pgmajfault"
- , "Systemd Services Memory Major Page Faults"
- , "MiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_mem_detailed_pgpgin)) {
- st_mem_detailed_pgpgin = rrdset_create_localhost(
- "services"
- , "mem_pgpgin"
- , NULL
- , "mem"
- , "services.mem_pgpgin"
- , "Systemd Services Memory Charging Activity"
- , "MiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 80
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- }
-
- if(unlikely(!st_mem_detailed_pgpgout)) {
- st_mem_detailed_pgpgout = rrdset_create_localhost(
- "services"
- , "mem_pgpgout"
- , NULL
- , "mem"
- , "services.mem_pgpgout"
- , "Systemd Services Memory Uncharging Activity"
- , "MiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 90
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
- }
-
- if(unlikely(do_mem_failcnt && !st_mem_failcnt)) {
- st_mem_failcnt = rrdset_create_localhost(
- "services"
- , "mem_failcnt"
- , NULL
- , "mem"
- , "services.mem_failcnt"
- , "Systemd Services Memory Limit Failures"
- , "failures"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 110
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if (do_swap_usage && !st_swap_usage) {
- st_swap_usage = rrdset_create_localhost(
- "services"
- , "swap_usage"
- , NULL
- , "swap"
- , "services.swap_usage"
- , "Systemd Services Swap Memory Used"
- , "MiB"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 100
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(likely(do_io)) {
- if(unlikely(!st_io_read)) {
- st_io_read = rrdset_create_localhost(
- "services"
- , "io_read"
- , NULL
- , "disk"
- , "services.io_read"
- , "Systemd Services Disk Read Bandwidth"
- , "KiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 120
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_io_write)) {
- st_io_write = rrdset_create_localhost(
- "services"
- , "io_write"
- , NULL
- , "disk"
- , "services.io_write"
- , "Systemd Services Disk Write Bandwidth"
- , "KiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 130
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
- }
-
- if(likely(do_io_ops)) {
- if(unlikely(!st_io_serviced_read)) {
- st_io_serviced_read = rrdset_create_localhost(
- "services"
- , "io_ops_read"
- , NULL
- , "disk"
- , "services.io_ops_read"
- , "Systemd Services Disk Read Operations"
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 140
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_io_serviced_write)) {
- st_io_serviced_write = rrdset_create_localhost(
- "services"
- , "io_ops_write"
- , NULL
- , "disk"
- , "services.io_ops_write"
- , "Systemd Services Disk Write Operations"
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 150
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
- }
-
- if(likely(do_throttle_io)) {
- if(unlikely(!st_throttle_io_read)) {
-
- st_throttle_io_read = rrdset_create_localhost(
- "services"
- , "throttle_io_read"
- , NULL
- , "disk"
- , "services.throttle_io_read"
- , "Systemd Services Throttle Disk Read Bandwidth"
- , "KiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 160
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- }
-
- if(unlikely(!st_throttle_io_write)) {
- st_throttle_io_write = rrdset_create_localhost(
- "services"
- , "throttle_io_write"
- , NULL
- , "disk"
- , "services.throttle_io_write"
- , "Systemd Services Throttle Disk Write Bandwidth"
- , "KiB/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 170
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
- }
-
- if(likely(do_throttle_ops)) {
- if(unlikely(!st_throttle_ops_read)) {
- st_throttle_ops_read = rrdset_create_localhost(
- "services"
- , "throttle_io_ops_read"
- , NULL
- , "disk"
- , "services.throttle_io_ops_read"
- , "Systemd Services Throttle Disk Read Operations"
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 180
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_throttle_ops_write)) {
- st_throttle_ops_write = rrdset_create_localhost(
- "services"
- , "throttle_io_ops_write"
- , NULL
- , "disk"
- , "services.throttle_io_ops_write"
- , "Systemd Services Throttle Disk Write Operations"
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 190
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
- }
-
- if(likely(do_queued_ops)) {
- if(unlikely(!st_queued_ops_read)) {
- st_queued_ops_read = rrdset_create_localhost(
- "services"
- , "queued_io_ops_read"
- , NULL
- , "disk"
- , "services.queued_io_ops_read"
- , "Systemd Services Queued Disk Read Operations"
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 200
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_queued_ops_write)) {
-
- st_queued_ops_write = rrdset_create_localhost(
- "services"
- , "queued_io_ops_write"
- , NULL
- , "disk"
- , "services.queued_io_ops_write"
- , "Systemd Services Queued Disk Write Operations"
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 210
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
- }
-
- if(likely(do_merged_ops)) {
- if(unlikely(!st_merged_ops_read)) {
- st_merged_ops_read = rrdset_create_localhost(
- "services"
- , "merged_io_ops_read"
- , NULL
- , "disk"
- , "services.merged_io_ops_read"
- , "Systemd Services Merged Disk Read Operations"
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 220
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- if(unlikely(!st_merged_ops_write)) {
- st_merged_ops_write = rrdset_create_localhost(
- "services"
- , "merged_io_ops_write"
- , NULL
- , "disk"
- , "services.merged_io_ops_write"
- , "Systemd Services Merged Disk Write Operations"
- , "operations/s"
- , PLUGIN_CGROUPS_NAME
- , PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME
- , NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 230
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
- }
-
+ int update_every,
+ int do_cpu,
+ int do_mem_usage,
+ int do_mem_detailed,
+ int do_mem_failcnt,
+ int do_swap_usage,
+ int do_io,
+ int do_io_ops,
+ int do_throttle_io,
+ int do_throttle_ops,
+ int do_queued_ops,
+ int do_merged_ops)
+{
// update the values
struct cgroup *cg;
- for(cg = cgroup_root; cg ; cg = cg->next) {
- if(unlikely(!cg->enabled || cg->pending_renames || !is_cgroup_systemd_service(cg)))
- continue;
+ int systemd_cgroup_chart_priority = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD;
+ char type[RRD_ID_LENGTH_MAX + 1];
- if(likely(do_cpu && cg->cpuacct_stat.updated)) {
- if(unlikely(!cg->rd_cpu)){
+ for (cg = cgroup_root; cg; cg = cg->next) {
+ if (unlikely(!cg->enabled || cg->pending_renames || !is_cgroup_systemd_service(cg)))
+ continue;
+ type[0] = '\0';
+ if (likely(do_cpu && cg->cpuacct_stat.updated)) {
+ if (unlikely(!cg->st_cpu)) {
+ cg->st_cpu = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "cpu_utilization",
+ NULL,
+ "cpu",
+ "systemd.service.cpu.utilization",
+ "Systemd Services CPU utilization (100%% = 1 core)",
+ "percentage",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority,
+ update_every,
+ RRDSET_TYPE_STACKED);
+ rrdset_update_rrdlabels(cg->st_cpu, cg->chart_labels);
if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_cpu, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_cpu, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
} else {
- cg->rd_cpu = rrddim_add(st_cpu, cg->chart_id, cg->chart_title, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_cpu, "user", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_cpu, "system", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
}
}
- rrddim_set_by_pointer(st_cpu, cg->rd_cpu, cg->cpuacct_stat.user + cg->cpuacct_stat.system);
- }
-
- if(likely(do_mem_usage && cg->memory.updated_usage_in_bytes)) {
- if(unlikely(!cg->rd_mem_usage))
- cg->rd_mem_usage = rrddim_add(st_mem_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(st_mem_usage, cg->rd_mem_usage, cg->memory.usage_in_bytes);
+ // complete the iteration
+ rrddim_set(cg->st_cpu, "user", cg->cpuacct_stat.user);
+ rrddim_set(cg->st_cpu, "system", cg->cpuacct_stat.system);
+ rrdset_done(cg->st_cpu);
}
- if(likely(do_mem_detailed && cg->memory.updated_detailed)) {
- if(unlikely(!cg->rd_mem_detailed_rss))
- cg->rd_mem_detailed_rss = rrddim_add(st_mem_detailed_rss, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(st_mem_detailed_rss, cg->rd_mem_detailed_rss, cg->memory.total_rss);
+ if (unlikely(do_mem_usage && cg->memory.updated_usage_in_bytes)) {
+ if (unlikely(!cg->st_mem_usage)) {
+ cg->st_mem_usage = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "mem_usage",
+ NULL,
+ "mem",
+ "systemd.service.memory.usage",
+ "Systemd Services Used Memory",
+ "MiB",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 5,
+ update_every,
+ RRDSET_TYPE_STACKED);
- if(unlikely(!cg->rd_mem_detailed_mapped))
- cg->rd_mem_detailed_mapped = rrddim_add(st_mem_detailed_mapped, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrdset_update_rrdlabels(cg->st_mem_usage, cg->chart_labels);
+ rrddim_add(cg->st_mem_usage, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ if (likely(do_swap_usage))
+ rrddim_add(cg->st_mem_usage, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
- rrddim_set_by_pointer(st_mem_detailed_mapped, cg->rd_mem_detailed_mapped, cg->memory.total_mapped_file);
+ rrddim_set(cg->st_mem_usage, "ram", cg->memory.usage_in_bytes);
+ if (likely(do_swap_usage)) {
+ if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
+ rrddim_set(
+ cg->st_mem_usage,
+ "swap",
+ cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ?
+ cg->memory.msw_usage_in_bytes -
+ (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) :
+ 0);
+ } else {
+ rrddim_set(cg->st_mem_usage, "swap", cg->memory.msw_usage_in_bytes);
+ }
+ }
+ rrdset_done(cg->st_mem_usage);
+ }
- if(unlikely(!cg->rd_mem_detailed_cache))
- cg->rd_mem_detailed_cache = rrddim_add(st_mem_detailed_cache, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ if (likely(do_mem_failcnt && cg->memory.updated_failcnt)) {
+ if (unlikely(do_mem_failcnt && !cg->st_mem_failcnt)) {
+ cg->st_mem_failcnt = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "mem_failcnt",
+ NULL,
+ "mem",
+ "systemd.service.memory.failcnt",
+ "Systemd Services Memory Limit Failures",
+ "failures/s",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 10,
+ update_every,
+ RRDSET_TYPE_LINE);
- rrddim_set_by_pointer(st_mem_detailed_cache, cg->rd_mem_detailed_cache, cg->memory.total_cache);
+ rrdset_update_rrdlabels(cg->st_mem_failcnt, cg->chart_labels);
+ rrddim_add(cg->st_mem_failcnt, "fail", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
- if(unlikely(!cg->rd_mem_detailed_writeback))
- cg->rd_mem_detailed_writeback = rrddim_add(st_mem_detailed_writeback, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_set(cg->st_mem_failcnt, "fail", cg->memory.failcnt);
+ rrdset_done(cg->st_mem_failcnt);
+ }
- rrddim_set_by_pointer(st_mem_detailed_writeback, cg->rd_mem_detailed_writeback, cg->memory.total_writeback);
+ if (likely(do_mem_detailed && cg->memory.updated_detailed)) {
+ if (unlikely(!cg->st_mem)) {
+ cg->st_mem = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "mem_ram_usage",
+ NULL,
+ "mem",
+ "systemd.service.memory.ram.usage",
+ "Systemd Services Memory",
+ "MiB",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 15,
+ update_every,
+ RRDSET_TYPE_STACKED);
- if(unlikely(!cg->rd_mem_detailed_pgfault))
- cg->rd_mem_detailed_pgfault = rrddim_add(st_mem_detailed_pgfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrdset_update_rrdlabels(cg->st_mem, cg->chart_labels);
+ rrddim_add(cg->st_mem, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_mem, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
- rrddim_set_by_pointer(st_mem_detailed_pgfault, cg->rd_mem_detailed_pgfault, cg->memory.total_pgfault);
+ rrddim_set(cg->st_mem, "rss", cg->memory.total_rss);
+ rrddim_set(cg->st_mem, "cache", cg->memory.total_cache);
+ rrddim_set(cg->st_mem, "mapped_file", cg->memory.total_mapped_file);
+ rrddim_set(cg->st_mem, "rss_huge", cg->memory.total_rss_huge);
+ rrdset_done(cg->st_mem);
- if(unlikely(!cg->rd_mem_detailed_pgmajfault))
- cg->rd_mem_detailed_pgmajfault = rrddim_add(st_mem_detailed_pgmajfault, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ if (unlikely(!cg->st_writeback)) {
+ cg->st_writeback = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "mem_writeback",
+ NULL,
+ "mem",
+ "systemd.service.memory.writeback",
+ "Systemd Services Writeback Memory",
+ "MiB",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 20,
+ update_every,
+ RRDSET_TYPE_STACKED);
- rrddim_set_by_pointer(st_mem_detailed_pgmajfault, cg->rd_mem_detailed_pgmajfault, cg->memory.total_pgmajfault);
+ rrdset_update_rrdlabels(cg->st_writeback, cg->chart_labels);
+ rrddim_add(cg->st_writeback, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(cg->st_writeback, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
- if(unlikely(!cg->rd_mem_detailed_pgpgin))
- cg->rd_mem_detailed_pgpgin = rrddim_add(st_mem_detailed_pgpgin, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_set(cg->st_writeback, "writeback", cg->memory.total_writeback);
+ rrddim_set(cg->st_writeback, "dirty", cg->memory.total_dirty);
+ rrdset_done(cg->st_writeback);
- rrddim_set_by_pointer(st_mem_detailed_pgpgin, cg->rd_mem_detailed_pgpgin, cg->memory.total_pgpgin);
+ if (unlikely(!cg->st_pgfaults)) {
+ cg->st_pgfaults = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "mem_pgfault",
+ NULL,
+ "mem",
+ "systemd.service.memory.paging.faults",
+ "Systemd Services Memory Minor and Major Page Faults",
+ "MiB/s",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 25,
+ update_every,
+ RRDSET_TYPE_AREA);
- if(unlikely(!cg->rd_mem_detailed_pgpgout))
- cg->rd_mem_detailed_pgpgout = rrddim_add(st_mem_detailed_pgpgout, cg->chart_id, cg->chart_title, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrdset_update_rrdlabels(cg->st_pgfaults, cg->chart_labels);
+ rrddim_add(cg->st_pgfaults, "minor", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_pgfaults, "major", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
- rrddim_set_by_pointer(st_mem_detailed_pgpgout, cg->rd_mem_detailed_pgpgout, cg->memory.total_pgpgout);
- }
+ rrddim_set(cg->st_pgfaults, "minor", cg->memory.total_pgfault);
+ rrddim_set(cg->st_pgfaults, "major", cg->memory.total_pgmajfault);
+ rrdset_done(cg->st_pgfaults);
- if(likely(do_mem_failcnt && cg->memory.updated_failcnt)) {
- if(unlikely(!cg->rd_mem_failcnt))
- cg->rd_mem_failcnt = rrddim_add(st_mem_failcnt, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if (unlikely(!cg->st_mem_activity)) {
+ cg->st_mem_activity = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "mem_paging_io",
+ NULL,
+ "mem",
+ "systemd.service.memory.paging.io",
+ "Systemd Services Memory Paging IO",
+ "MiB/s",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 30,
+ update_every,
+ RRDSET_TYPE_AREA);
+
+ rrdset_update_rrdlabels(cg->st_mem_activity, cg->chart_labels);
+ rrddim_add(cg->st_mem_activity, "in", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_mem_activity, "out", NULL, -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
- rrddim_set_by_pointer(st_mem_failcnt, cg->rd_mem_failcnt, cg->memory.failcnt);
+ rrddim_set(cg->st_mem_activity, "in", cg->memory.total_pgpgin);
+ rrddim_set(cg->st_mem_activity, "out", cg->memory.total_pgpgout);
+ rrdset_done(cg->st_mem_activity);
}
- if(likely(do_swap_usage && cg->memory.updated_msw_usage_in_bytes)) {
- if(unlikely(!cg->rd_swap_usage))
- cg->rd_swap_usage = rrddim_add(st_swap_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+ if (likely(do_io && cg->io_service_bytes.updated)) {
+ if (unlikely(!cg->st_io)) {
+ cg->st_io = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "disk_io",
+ NULL,
+ "disk",
+ "systemd.service.disk.io",
+ "Systemd Services Disk Read/Write Bandwidth",
+ "KiB/s",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 35,
+ update_every,
+ RRDSET_TYPE_AREA);
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_set_by_pointer(
- st_swap_usage,
- cg->rd_swap_usage,
- cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ?
- cg->memory.msw_usage_in_bytes - (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) : 0);
- } else {
- rrddim_set_by_pointer(st_swap_usage, cg->rd_swap_usage, cg->memory.msw_usage_in_bytes);
+ rrdset_update_rrdlabels(cg->st_io, cg->chart_labels);
+ rrddim_add(cg->st_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
}
+ rrddim_set(cg->st_io, "read", cg->io_service_bytes.Read);
+ rrddim_set(cg->st_io, "write", cg->io_service_bytes.Write);
+ rrdset_done(cg->st_io);
}
- if(likely(do_io && cg->io_service_bytes.updated)) {
- if(unlikely(!cg->rd_io_service_bytes_read))
- cg->rd_io_service_bytes_read = rrddim_add(st_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st_io_read, cg->rd_io_service_bytes_read, cg->io_service_bytes.Read);
-
- if(unlikely(!cg->rd_io_service_bytes_write))
- cg->rd_io_service_bytes_write = rrddim_add(st_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st_io_write, cg->rd_io_service_bytes_write, cg->io_service_bytes.Write);
- }
-
- if(likely(do_io_ops && cg->io_serviced.updated)) {
- if(unlikely(!cg->rd_io_serviced_read))
- cg->rd_io_serviced_read = rrddim_add(st_io_serviced_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st_io_serviced_read, cg->rd_io_serviced_read, cg->io_serviced.Read);
-
- if(unlikely(!cg->rd_io_serviced_write))
- cg->rd_io_serviced_write = rrddim_add(st_io_serviced_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if (likely(do_io_ops && cg->io_serviced.updated)) {
+ if (unlikely(!cg->st_serviced_ops)) {
+ cg->st_serviced_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "disk_iops",
+ NULL,
+ "disk",
+ "systemd.service.disk.iops",
+ "Systemd Services Disk Read/Write Operations",
+ "operations/s",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 40,
+ update_every,
+ RRDSET_TYPE_LINE);
- rrddim_set_by_pointer(st_io_serviced_write, cg->rd_io_serviced_write, cg->io_serviced.Write);
+ rrdset_update_rrdlabels(cg->st_serviced_ops, cg->chart_labels);
+ rrddim_add(cg->st_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ rrddim_set(cg->st_serviced_ops, "read", cg->io_serviced.Read);
+ rrddim_set(cg->st_serviced_ops, "write", cg->io_serviced.Write);
+ rrdset_done(cg->st_serviced_ops);
}
- if(likely(do_throttle_io && cg->throttle_io_service_bytes.updated)) {
- if(unlikely(!cg->rd_throttle_io_read))
- cg->rd_throttle_io_read = rrddim_add(st_throttle_io_read, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st_throttle_io_read, cg->rd_throttle_io_read, cg->throttle_io_service_bytes.Read);
-
- if(unlikely(!cg->rd_throttle_io_write))
- cg->rd_throttle_io_write = rrddim_add(st_throttle_io_write, cg->chart_id, cg->chart_title, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ if (likely(do_throttle_io && cg->throttle_io_service_bytes.updated)) {
+ if (unlikely(!cg->st_throttle_io)) {
+ cg->st_throttle_io = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "disk_throttle_io",
+ NULL,
+ "disk",
+ "systemd.service.disk.throttle.io",
+ "Systemd Services Throttle Disk Read/Write Bandwidth",
+ "KiB/s",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 45,
+ update_every,
+ RRDSET_TYPE_AREA);
- rrddim_set_by_pointer(st_throttle_io_write, cg->rd_throttle_io_write, cg->throttle_io_service_bytes.Write);
+ rrdset_update_rrdlabels(cg->st_throttle_io, cg->chart_labels);
+ rrddim_add(cg->st_throttle_io, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_throttle_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+ }
+ rrddim_set(cg->st_throttle_io, "read", cg->throttle_io_service_bytes.Read);
+ rrddim_set(cg->st_throttle_io, "write", cg->throttle_io_service_bytes.Write);
+ rrdset_done(cg->st_throttle_io);
}
- if(likely(do_throttle_ops && cg->throttle_io_serviced.updated)) {
- if(unlikely(!cg->rd_throttle_io_serviced_read))
- cg->rd_throttle_io_serviced_read = rrddim_add(st_throttle_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st_throttle_ops_read, cg->rd_throttle_io_serviced_read, cg->throttle_io_serviced.Read);
-
- if(unlikely(!cg->rd_throttle_io_serviced_write))
- cg->rd_throttle_io_serviced_write = rrddim_add(st_throttle_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if (likely(do_throttle_ops && cg->throttle_io_serviced.updated)) {
+ if (unlikely(!cg->st_throttle_serviced_ops)) {
+ cg->st_throttle_serviced_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "disk_throttle_iops",
+ NULL,
+ "disk",
+ "systemd.service.disk.throttle.iops",
+ "Systemd Services Throttle Disk Read/Write Operations",
+ "operations/s",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 50,
+ update_every,
+ RRDSET_TYPE_LINE);
- rrddim_set_by_pointer(st_throttle_ops_write, cg->rd_throttle_io_serviced_write, cg->throttle_io_serviced.Write);
+ rrdset_update_rrdlabels(cg->st_throttle_serviced_ops, cg->chart_labels);
+ rrddim_add(cg->st_throttle_serviced_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_throttle_serviced_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ rrddim_set(cg->st_throttle_serviced_ops, "read", cg->throttle_io_serviced.Read);
+ rrddim_set(cg->st_throttle_serviced_ops, "write", cg->throttle_io_serviced.Write);
+ rrdset_done(cg->st_throttle_serviced_ops);
}
- if(likely(do_queued_ops && cg->io_queued.updated)) {
- if(unlikely(!cg->rd_io_queued_read))
- cg->rd_io_queued_read = rrddim_add(st_queued_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st_queued_ops_read, cg->rd_io_queued_read, cg->io_queued.Read);
-
- if(unlikely(!cg->rd_io_queued_write))
- cg->rd_io_queued_write = rrddim_add(st_queued_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if (likely(do_queued_ops && cg->io_queued.updated)) {
+ if (unlikely(!cg->st_queued_ops)) {
+ cg->st_queued_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "disk_queued_iops",
+ NULL,
+ "disk",
+ "systemd.service.disk.queued_iops",
+ "Systemd Services Queued Disk Read/Write Operations",
+ "operations/s",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 55,
+ update_every,
+ RRDSET_TYPE_LINE);
- rrddim_set_by_pointer(st_queued_ops_write, cg->rd_io_queued_write, cg->io_queued.Write);
+ rrdset_update_rrdlabels(cg->st_queued_ops, cg->chart_labels);
+ rrddim_add(cg->st_queued_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_queued_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ rrddim_set(cg->st_queued_ops, "read", cg->io_queued.Read);
+ rrddim_set(cg->st_queued_ops, "write", cg->io_queued.Write);
+ rrdset_done(cg->st_queued_ops);
}
- if(likely(do_merged_ops && cg->io_merged.updated)) {
- if(unlikely(!cg->rd_io_merged_read))
- cg->rd_io_merged_read = rrddim_add(st_merged_ops_read, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st_merged_ops_read, cg->rd_io_merged_read, cg->io_merged.Read);
-
- if(unlikely(!cg->rd_io_merged_write))
- cg->rd_io_merged_write = rrddim_add(st_merged_ops_write, cg->chart_id, cg->chart_title, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ if (likely(do_merged_ops && cg->io_merged.updated)) {
+ if (unlikely(!cg->st_merged_ops)) {
+ cg->st_merged_ops = rrdset_create_localhost(
+ cgroup_chart_type(type, cg),
+ "disk_merged_iops",
+ NULL,
+ "disk",
+ "systemd.service.disk.merged_iops",
+ "Systemd Services Merged Disk Read/Write Operations",
+ "operations/s",
+ PLUGIN_CGROUPS_NAME,
+ PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME,
+ systemd_cgroup_chart_priority + 60,
+ update_every,
+ RRDSET_TYPE_LINE);
- rrddim_set_by_pointer(st_merged_ops_write, cg->rd_io_merged_write, cg->io_merged.Write);
+ rrdset_update_rrdlabels(cg->st_merged_ops, cg->chart_labels);
+ rrddim_add(cg->st_merged_ops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(cg->st_merged_ops, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+ rrddim_set(cg->st_merged_ops, "read", cg->io_merged.Read);
+ rrddim_set(cg->st_merged_ops, "write", cg->io_merged.Write);
+ rrdset_done(cg->st_merged_ops);
}
}
-
- // complete the iteration
- if(likely(do_cpu))
- rrdset_done(st_cpu);
-
- if(likely(do_mem_usage))
- rrdset_done(st_mem_usage);
-
- if(unlikely(do_mem_detailed)) {
- rrdset_done(st_mem_detailed_cache);
- rrdset_done(st_mem_detailed_rss);
- rrdset_done(st_mem_detailed_mapped);
- rrdset_done(st_mem_detailed_writeback);
- rrdset_done(st_mem_detailed_pgfault);
- rrdset_done(st_mem_detailed_pgmajfault);
- rrdset_done(st_mem_detailed_pgpgin);
- rrdset_done(st_mem_detailed_pgpgout);
- }
-
- if(likely(do_mem_failcnt))
- rrdset_done(st_mem_failcnt);
-
- if(likely(do_swap_usage))
- rrdset_done(st_swap_usage);
-
- if(likely(do_io)) {
- rrdset_done(st_io_read);
- rrdset_done(st_io_write);
- }
-
- if(likely(do_io_ops)) {
- rrdset_done(st_io_serviced_read);
- rrdset_done(st_io_serviced_write);
- }
-
- if(likely(do_throttle_io)) {
- rrdset_done(st_throttle_io_read);
- rrdset_done(st_throttle_io_write);
- }
-
- if(likely(do_throttle_ops)) {
- rrdset_done(st_throttle_ops_read);
- rrdset_done(st_throttle_ops_write);
- }
-
- if(likely(do_queued_ops)) {
- rrdset_done(st_queued_ops_read);
- rrdset_done(st_queued_ops_write);
- }
-
- if(likely(do_merged_ops)) {
- rrdset_done(st_merged_ops_read);
- rrdset_done(st_merged_ops_write);
- }
-}
-
-static inline char *cgroup_chart_type(char *buffer, const char *id, size_t len) {
- if(buffer[0]) return buffer;
-
- if(id[0] == '\0' || (id[0] == '/' && id[1] == '\0'))
- strncpy(buffer, "cgroup_root", len);
- else
- snprintfz(buffer, len, "%s%s", cgroup_chart_id_prefix, id);
-
- netdata_fix_chart_id(buffer);
- return buffer;
}
static inline void update_cpu_limits(char **filename, unsigned long long *value, struct cgroup *cg) {
@@ -3719,7 +3387,7 @@ void update_cgroup_charts(int update_every) {
k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU)" : "CPU Usage (100%% = 1 core)");
cg->st_cpu = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "cpu"
, NULL
, "cpu"
@@ -3788,7 +3456,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "CPU Usage within the limits");
cg->st_cpu_limit = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "cpu_limit"
, NULL
, "cpu"
@@ -3840,7 +3508,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "CPU Throttled Runnable Periods");
cg->st_cpu_nr_throttled = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "throttled"
, NULL
, "cpu"
@@ -3865,7 +3533,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "CPU Throttled Time Duration");
cg->st_cpu_throttled_time = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "throttled_duration"
, NULL
, "cpu"
@@ -3892,7 +3560,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "CPU Time Relative Share");
cg->st_cpu_shares = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "cpu_shares"
, NULL
, "cpu"
@@ -3926,7 +3594,7 @@ void update_cgroup_charts(int update_every) {
"CPU Usage (100%% = 1 core) Per Core");
cg->st_cpu_per_core = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "cpu_per_core"
, NULL
, "cpu"
@@ -3960,7 +3628,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Memory Usage");
cg->st_mem = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "mem"
, NULL
, "mem"
@@ -4018,7 +3686,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Writeback Memory");
cg->st_writeback = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "writeback"
, NULL
, "mem"
@@ -4051,7 +3719,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Memory Activity");
cg->st_mem_activity = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "mem_activity"
, NULL
, "mem"
@@ -4080,7 +3748,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Memory Page Faults");
cg->st_pgfaults = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "pgfaults"
, NULL
, "mem"
@@ -4110,7 +3778,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Used Memory");
cg->st_mem_usage = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "mem_usage"
, NULL
, "mem"
@@ -4175,7 +3843,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Used RAM within the limits");
cg->st_mem_usage_limit = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "mem_usage_limit"
, NULL
, "mem"
@@ -4205,7 +3873,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Memory Utilization");
cg->st_mem_utilization = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "mem_utilization"
, NULL
, "mem"
@@ -4253,7 +3921,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Memory Limit Failures");
cg->st_mem_failcnt = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "mem_failcnt"
, NULL
, "mem"
@@ -4281,7 +3949,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "I/O Bandwidth (all disks)");
cg->st_io = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "io"
, NULL
, "disk"
@@ -4311,7 +3979,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Serviced I/O Operations (all disks)");
cg->st_serviced_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "serviced_ops"
, NULL
, "disk"
@@ -4341,7 +4009,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Throttle I/O Bandwidth (all disks)");
cg->st_throttle_io = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "throttle_io"
, NULL
, "disk"
@@ -4371,7 +4039,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Throttle Serviced I/O Operations (all disks)");
cg->st_throttle_serviced_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "throttle_serviced_ops"
, NULL
, "disk"
@@ -4401,7 +4069,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Queued I/O Operations (all disks)");
cg->st_queued_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "queued_ops"
, NULL
, "disk"
@@ -4431,7 +4099,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Merged I/O Operations (all disks)");
cg->st_merged_ops = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "merged_ops"
, NULL
, "disk"
@@ -4467,7 +4135,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "CPU some pressure");
chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "cpu_some_pressure"
, NULL
, "cpu"
@@ -4490,7 +4158,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "CPU some pressure stall time");
chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "cpu_some_pressure_stall_time"
, NULL
, "cpu"
@@ -4517,7 +4185,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "CPU full pressure");
chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "cpu_full_pressure"
, NULL
, "cpu"
@@ -4540,7 +4208,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "CPU full pressure stall time");
chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "cpu_full_pressure_stall_time"
, NULL
, "cpu"
@@ -4570,7 +4238,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "Memory some pressure");
chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "mem_some_pressure"
, NULL
, "mem"
@@ -4593,7 +4261,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "Memory some pressure stall time");
chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "memory_some_pressure_stall_time"
, NULL
, "mem"
@@ -4622,7 +4290,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "Memory full pressure");
chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "mem_full_pressure"
, NULL
, "mem"
@@ -4646,7 +4314,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "Memory full pressure stall time");
chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "memory_full_pressure_stall_time"
, NULL
, "mem"
@@ -4676,7 +4344,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "IRQ some pressure");
chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "irq_some_pressure"
, NULL
, "interrupts"
@@ -4699,7 +4367,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "IRQ some pressure stall time");
chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "irq_some_pressure_stall_time"
, NULL
, "interrupts"
@@ -4728,7 +4396,7 @@ void update_cgroup_charts(int update_every) {
snprintfz(title, CHART_TITLE_MAX, "IRQ full pressure");
chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "irq_full_pressure"
, NULL
, "interrupts"
@@ -4752,7 +4420,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "IRQ full pressure stall time");
chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "irq_full_pressure_stall_time"
, NULL
, "interrupts"
@@ -4782,7 +4450,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "I/O some pressure");
chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "io_some_pressure"
, NULL
, "disk"
@@ -4805,7 +4473,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "I/O some pressure stall time");
chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "io_some_pressure_stall_time"
, NULL
, "disk"
@@ -4833,7 +4501,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "I/O full pressure");
chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "io_full_pressure"
, NULL
, "disk"
@@ -4856,7 +4524,7 @@ void update_cgroup_charts(int update_every) {
RRDSET *chart;
snprintfz(title, CHART_TITLE_MAX, "I/O full pressure stall time");
chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ cgroup_chart_type(type, cg)
, "io_full_pressure_stall_time"
, NULL
, "disk"
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h
index dc800ba91..625be755d 100644
--- a/collectors/cgroups.plugin/sys_fs_cgroup.h
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.h
@@ -39,6 +39,6 @@ typedef struct netdata_ebpf_cgroup_shm {
#include "../proc.plugin/plugin_proc.h"
-char *cgroup_parse_resolved_name_and_labels(DICTIONARY *labels, char *data);
+char *cgroup_parse_resolved_name_and_labels(RRDLABELS *labels, char *data);
#endif //NETDATA_SYS_FS_CGROUP_H
diff --git a/collectors/cgroups.plugin/tests/test_cgroups_plugin.c b/collectors/cgroups.plugin/tests/test_cgroups_plugin.c
index a0f915309..bb1fb3988 100644
--- a/collectors/cgroups.plugin/tests/test_cgroups_plugin.c
+++ b/collectors/cgroups.plugin/tests/test_cgroups_plugin.c
@@ -20,13 +20,12 @@ struct k8s_test_data {
int i;
};
-static int read_label_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data)
+static int read_label_callback(const char *name, const char *value, void *data)
{
struct k8s_test_data *test_data = (struct k8s_test_data *)data;
test_data->result_key[test_data->i] = name;
test_data->result_value[test_data->i] = value;
- test_data->result_ls[test_data->i] = ls;
test_data->i++;
@@ -37,7 +36,7 @@ static void test_cgroup_parse_resolved_name(void **state)
{
UNUSED(state);
- DICTIONARY *labels = rrdlabels_create();
+ RRDLABELS *labels = rrdlabels_create();
struct k8s_test_data test_data[] = {
// One label