summaryrefslogtreecommitdiffstats
path: root/collectors/cgroups.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-08-10 09:18:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-08-10 09:18:49 +0000
commitdd814a7c1a8de056a79f7238578b09236edd5506 (patch)
tree429e7eed5a634a4efe9a6877ce66da8e64aa1782 /collectors/cgroups.plugin
parentAdding upstream version 1.41.0. (diff)
downloadnetdata-dd814a7c1a8de056a79f7238578b09236edd5506.tar.xz
netdata-dd814a7c1a8de056a79f7238578b09236edd5506.zip
Adding upstream version 1.42.0.upstream/1.42.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/cgroups.plugin')
-rw-r--r--collectors/cgroups.plugin/metadata.yaml (renamed from collectors/cgroups.plugin/multi_metadata.yaml)73
-rw-r--r--collectors/cgroups.plugin/metrics.csv109
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.c165
3 files changed, 201 insertions, 146 deletions
diff --git a/collectors/cgroups.plugin/multi_metadata.yaml b/collectors/cgroups.plugin/metadata.yaml
index b2b13c2d5..b342d30a3 100644
--- a/collectors/cgroups.plugin/multi_metadata.yaml
+++ b/collectors/cgroups.plugin/metadata.yaml
@@ -1,4 +1,4 @@
-name: cgroups.plugin
+plugin_name: cgroups.plugin
modules:
- &module
meta: &meta
@@ -9,7 +9,7 @@ modules:
link: ""
categories:
- data-collection.containers-and-vms
- icon_filename: netdata.png
+ icon_filename: container.svg
related_resources:
integrations:
list: []
@@ -18,14 +18,14 @@ modules:
keywords:
- containers
most_popular: true
- overview:
- data_collection:
- metrics_description: ""
+ overview: &overview
+ data_collection: &data_collection
+ metrics_description: "Monitor Containers for performance, resource usage, and health status."
method_description: ""
supported_platforms:
include: []
exclude: []
- multi-instance: true
+ multi_instance: true
additional_permissions:
description: ""
default_behavior:
@@ -398,22 +398,26 @@ modules:
chart_type: line
dimensions:
- name: mtu
-
- <<: *module
meta:
<<: *meta
monitored_instance:
name: Kubernetes Containers
link: https://kubernetes.io/
- icon_filename: k8s.png
+ icon_filename: kubernetes.svg
categories:
- - data-collection.containers-vms
+ - data-collection.containers-and-vms
- data-collection.kubernetes
keywords:
- k8s
- kubernetes
- pods
- containers
+ overview:
+ <<: *overview
+ data-collection:
+ <<: *data_collection
+ metrics_description: Monitor Kubernetes Clusters for performance, resource usage, and health status.
alerts:
- name: k8s_cgroup_10min_cpu_usage
link: https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf
@@ -792,16 +796,23 @@ modules:
chart_type: line
dimensions:
- name: mtu
-
- <<: *module
meta:
<<: *meta
monitored_instance:
name: Systemd Services
link: ""
- icon_filename: systemd.png
+ icon_filename: systemd.svg
categories:
- data-collection.systemd
+ keywords:
+ - systemd
+ - services
+ overview:
+ <<: *overview
+ data-collection:
+ <<: *data_collection
+ metrics_desctiption: "Monitor Systemd Services for performance, resource usage, and health status."
alerts: []
metrics:
folding:
@@ -964,14 +975,18 @@ modules:
monitored_instance:
name: Virtual Machines
link: ""
- icon_filename: k8s.png
+ icon_filename: container.svg
categories:
- - data-collection.containers-vms
- - data-collection.kubernetes
+ - data-collection.containers-and-vms
keywords:
- vms
- virtualization
- container
+ overview:
+ <<: *overview
+ data_collection:
+ <<: *data_collection
+ metrics_description: "Monitor Virtual Machines for performance, resource usage, and health status."
- <<: *module
meta:
<<: *meta
@@ -980,11 +995,16 @@ modules:
link: ""
icon_filename: lxc.png
categories:
- - data-collection.containers-vms
+ - data-collection.containers-and-vms
keywords:
- lxc
- lxd
- container
+ overview:
+ <<: *overview
+ data_collection:
+ <<: *data_collection
+ metrics_description: "Monitor LXC Containers for performance, resource usage, and health status."
- <<: *module
meta:
<<: *meta
@@ -993,22 +1013,32 @@ modules:
link: ""
icon_filename: libvirt.png
categories:
- - data-collection.containers-vms
+ - data-collection.containers-and-vms
keywords:
- libvirt
- container
+ overview:
+ <<: *overview
+ data_collection:
+ <<: *data_collection
+ metrics_description: "Monitor Libvirt for performance, resource usage, and health status."
- <<: *module
meta:
<<: *meta
monitored_instance:
name: oVirt Containers
link: ""
- icon_filename: ovirt.png
+ icon_filename: ovirt.svg
categories:
- - data-collection.containers-vms
+ - data-collection.containers-and-vms
keywords:
- ovirt
- container
+ overview:
+ <<: *overview
+ data_collection:
+ <<: *data_collection
+ metrics_description: "Monitor oVirt for performance, resource usage, and health status."
- <<: *module
meta:
<<: *meta
@@ -1017,7 +1047,12 @@ modules:
link: ""
icon_filename: proxmox.png
categories:
- - data-collection.containers-vms
+ - data-collection.containers-and-vms
keywords:
- proxmox
- container
+ overview:
+ <<: *overview
+ data_collection:
+ <<: *data_collection
+ metrics_description: "Monitor Proxmox for performance, resource usage, and health status."
diff --git a/collectors/cgroups.plugin/metrics.csv b/collectors/cgroups.plugin/metrics.csv
deleted file mode 100644
index aae057baa..000000000
--- a/collectors/cgroups.plugin/metrics.csv
+++ /dev/null
@@ -1,109 +0,0 @@
-metric,scope,dimensions,unit,description,chart_type,labels,plugin,module
-cgroup.cpu_limit,cgroup,used,percentage,"CPU Usage within the limits",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.cpu,cgroup,"user, system",percentage,"CPU Usage (100% = 1 core)",stacked,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.cpu_per_core,cgroup,a dimension per core,percentage,"CPU Usage (100% = 1 core) Per Core",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.throttled,cgroup,throttled,percentage,"CPU Throttled Runnable Periods",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.throttled_duration,cgroup,duration,ms,"CPU Throttled Time Duration",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.cpu_shares,cgroup,shares,shares,"CPU Time Relative Share",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.mem,cgroup,"cache, rss, swap, rss_huge, mapped_file",MiB,"Memory Usage",stacked,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.writeback,cgroup,"dirty, writeback",MiB,"Writeback Memory",area,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.mem_activity,cgroup,"in, out",MiB/s,"Memory Activity",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.pgfaults,cgroup,"pgfault, swap",MiB/s,"Memory Page Faults",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.mem_usage,cgroup,"ram, swap",MiB,"Used Memory",stacked,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.mem_usage_limit,cgroup,"available, used",MiB,"Used RAM within the limits",stacked,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.mem_utilization,cgroup,utilization,percentage,"Memory Utilization",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.mem_failcnt,cgroup,failures,count,"Memory Limit Failures",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.io,cgroup,"read, write",KiB/s,"I/O Bandwidth (all disks)",area,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.serviced_ops,cgroup,"read, write",operations/s,"Serviced I/O Operations (all disks)",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.throttle_io,cgroup,"read, write",KiB/s,"Throttle I/O Bandwidth (all disks)",area,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.throttle_serviced_ops,cgroup,"read, write",operations/s,"Throttle Serviced I/O Operations (all disks)",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.queued_ops,cgroup,"read, write",operations,"Queued I/O Operations (all disks)",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.merged_ops,cgroup,"read, write",operations/s,"Merged I/O Operations (all disks)",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.cpu_some_pressure,cgroup,"some10, some60, some300",percentage,"CPU some pressure",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.cpu_some_pressure_stall_time,cgroup,time,ms,"CPU some pressure stall time",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.cpu_full_pressure,cgroup,"some10, some60, some300",percentage,"CPU full pressure",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.cpu_full_pressure_stall_time,cgroup,time,ms,"CPU full pressure stall time",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.memory_some_pressure,cgroup,"some10, some60, some300",percentage,"Memory some pressure",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.memory_some_pressure_stall_time,cgroup,time,ms,"Memory some pressure stall time",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.memory_full_pressure,cgroup,"some10, some60, some300",percentage,"Memory full pressure",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.memory_full_pressure_stall_time,cgroup,time,ms,"Memory full pressure stall time",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.io_some_pressure,cgroup,"some10, some60, some300",percentage,"I/O some pressure",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.io_some_pressure_stall_time,cgroup,time,ms,"I/O some pressure stall time",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.io_full_pressure,cgroup,"some10, some60, some300",percentage,"I/O some pressure",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.io_full_pressure_stall_time,cgroup,time,ms,"I/O some pressure stall time",line,"container_name, image",cgroups.plugin,/sys/fs/cgroup
-cgroup.net_net,"cgroup, network device","received, sent",kilobits/s,"Bandwidth",area,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-cgroup.net_packets,"cgroup, network device","received, sent, multicast",pps,"Packets",line,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-cgroup.net_errors,"cgroup, network device","inbound, outbound",errors/s,"Interface Errors",line,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-cgroup.net_drops,"cgroup, network device","inbound, outbound",errors/s,"Interface Drops",line,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-cgroup.net_fifo,"cgroup, network device","receive, transmit",errors/s,"Interface FIFO Buffer Errors",line,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-cgroup.net_compressed,"cgroup, network device","receive, sent",pps,"Interface FIFO Buffer Errors",line,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-cgroup.net_events,"cgroup, network device","frames, collisions, carrier",events/s,"Network Interface Events",line,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-cgroup.net_operstate,"cgroup, network device","up, down, notpresent, lowerlayerdown, testing, dormant, unknown",state,"Interface Operational State",line,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-cgroup.net_carrier,"cgroup, network device","up, down",state,"Interface Physical Link State",line,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-cgroup.net_mtu,"cgroup, network device",mtu,octets,"Interface MTU",line,"container_name, image, device, interface_type",cgroups.plugin,/proc/net/dev
-k8s.cgroup.cpu_limit,k8s cgroup,used,percentage,"CPU Usage within the limits",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.cpu,k8s cgroup,"user, system",percentage,"CPU Usage (100% = 1000 mCPU)",stacked,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.cpu_per_core,k8s cgroup,a dimension per core,percentage,"CPU Usage (100% = 1000 mCPU) Per Core",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.throttled,k8s cgroup,throttled,percentage,"CPU Throttled Runnable Periods",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.throttled_duration,k8s cgroup,duration,ms,"CPU Throttled Time Duration",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.cpu_shares,k8s cgroup,shares,shares,"CPU Time Relative Share",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.mem,k8s cgroup,"cache, rss, swap, rss_huge, mapped_file",MiB,"Memory Usage",stacked,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.writeback,k8s cgroup,"dirty, writeback",MiB,"Writeback Memory",area,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.mem_activity,k8s cgroup,"in, out",MiB/s,"Memory Activity",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.pgfaults,k8s cgroup,"pgfault, swap",MiB/s,"Memory Page Faults",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.mem_usage,k8s cgroup,"ram, swap",MiB,"Used Memory",stacked,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.mem_usage_limit,k8s cgroup,"available, used",MiB,"Used RAM within the limits",stacked,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.mem_utilization,k8s cgroup,utilization,percentage,"Memory Utilization",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.mem_failcnt,k8s cgroup,failures,count,"Memory Limit Failures",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.io,k8s cgroup,"read, write",KiB/s,"I/O Bandwidth (all disks)",area,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.serviced_ops,k8s cgroup,"read, write",operations/s,"Serviced I/O Operations (all disks)",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.throttle_io,k8s cgroup,"read, write",KiB/s,"Throttle I/O Bandwidth (all disks)",area,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.throttle_serviced_ops,k8s cgroup,"read, write",operations/s,"Throttle Serviced I/O Operations (all disks)",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.queued_ops,k8s cgroup,"read, write",operations,"Queued I/O Operations (all disks)",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.merged_ops,k8s cgroup,"read, write",operations/s,"Merged I/O Operations (all disks)",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.cpu_some_pressure,k8s cgroup,"some10, some60, some300",percentage,"CPU some pressure",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.cpu_some_pressure_stall_time,k8s cgroup,time,ms,"CPU some pressure stall time",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.cpu_full_pressure,k8s cgroup,"some10, some60, some300",percentage,"CPU full pressure",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.cpu_full_pressure_stall_time,k8s cgroup,time,ms,"CPU full pressure stall time",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.memory_some_pressure,k8s cgroup,"some10, some60, some300",percentage,"Memory some pressure",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.memory_some_pressure_stall_time,k8s cgroup,time,ms,"Memory some pressure stall time",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.memory_full_pressure,k8s cgroup,"some10, some60, some300",percentage,"Memory full pressure",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.memory_full_pressure_stall_time,k8s cgroup,time,ms,"Memory full pressure stall time",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.io_some_pressure,k8s cgroup,"some10, some60, some300",percentage,"I/O some pressure",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.io_some_pressure_stall_time,k8s cgroup,time,ms,"I/O some pressure stall time",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.io_full_pressure,k8s cgroup,"some10, some60, some300",percentage,"I/O some pressure",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.io_full_pressure_stall_time,k8s cgroup,time,ms,"I/O some pressure stall time",line,"k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/sys/fs/cgroup
-k8s.cgroup.net_net,"k8s cgroup, network device","received, sent",kilobits/s,"Bandwidth",area,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-k8s.cgroup.net_packets,"k8s cgroup, network device","received, sent, multicast",pps,"Packets",line,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-k8s.cgroup.net_errors,"k8s cgroup, network device","inbound, outbound",errors/s,"Interface Errors",line,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-k8s.cgroup.net_drops,"k8s cgroup, network device","inbound, outbound",errors/s,"Interface Drops",line,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-k8s.cgroup.net_fifo,"k8s cgroup, network device","receive, transmit",errors/s,"Interface FIFO Buffer Errors",line,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-k8s.cgroup.net_compressed,"k8s cgroup, network device","receive, sent",pps,"Interface FIFO Buffer Errors",line,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-k8s.cgroup.net_events,"k8s cgroup, network device","frames, collisions, carrier",events/s,"Network Interface Events",line,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-k8s.cgroup.net_operstate,"k8s cgroup, network device","up, down, notpresent, lowerlayerdown, testing, dormant, unknown",state,"Interface Operational State",line,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-k8s.cgroup.net_carrier,"k8s cgroup, network device","up, down",state,"Interface Physical Link State",line,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-k8s.cgroup.net_mtu,"k8s cgroup, network device",mtu,octets,"Interface MTU",line,"device, interface_type, k8s_namespace, k8s_pod_name, k8s_pod_uid, k8s_controller_kind, k8s_controller_name, k8s_node_name, k8s_container_name, k8s_container_id, k8s_kind, k8s_qos_class, k8s_cluster_id",cgroups.plugin,/proc/net/dev
-services.cpu,,a dimension per systemd service,percentage,"Systemd Services CPU utilization (100% = 1 core)",stacked,,cgroups.plugin,systemd
-services.mem_usage,,a dimension per systemd service,MiB,"Systemd Services Used Memory",stacked,,cgroups.plugin,systemd
-services.mem_rss,,a dimension per systemd service,MiB,"Systemd Services RSS Memory",stacked,,cgroups.plugin,systemd
-services.mem_mapped,,a dimension per systemd service,MiB,"Systemd Services Mapped Memory",stacked,,cgroups.plugin,systemd
-services.mem_cache,,a dimension per systemd service,MiB,"Systemd Services Cache Memory",stacked,,cgroups.plugin,systemd
-services.mem_writeback,,a dimension per systemd service,MiB,"Systemd Services Writeback Memory",stacked,,cgroups.plugin,systemd
-services.mem_pgfault,,a dimension per systemd service,MiB/s,"Systemd Services Memory Minor Page Faults",stacked,,cgroups.plugin,systemd
-services.mem_pgmajfault,,a dimension per systemd service,MiB/s,"Systemd Services Memory Major Page Faults",stacked,,cgroups.plugin,systemd
-services.mem_pgpgin,,a dimension per systemd service,MiB/s,"Systemd Services Memory Charging Activity",stacked,,cgroups.plugin,systemd
-services.mem_pgpgout,,a dimension per systemd service,MiB/s,"Systemd Services Memory Uncharging Activity",stacked,,cgroups.plugin,systemd
-services.mem_failcnt,,a dimension per systemd service,failures,"Systemd Services Memory Limit Failures",stacked,,cgroups.plugin,systemd
-services.swap_usage,,a dimension per systemd service,MiB,"Systemd Services Swap Memory Used",stacked,,cgroups.plugin,systemd
-services.io_read,,a dimension per systemd service,KiB/s,"Systemd Services Disk Read Bandwidth",stacked,,cgroups.plugin,systemd
-services.io_write,,a dimension per systemd service,KiB/s,"Systemd Services Disk Write Bandwidth",stacked,,cgroups.plugin,systemd
-services.io_ops_read,,a dimension per systemd service,operations/s,"Systemd Services Disk Read Operations",stacked,,cgroups.plugin,systemd
-services.io_ops_write,,a dimension per systemd service,operations/s,"Systemd Services Disk Write Operations",stacked,,cgroups.plugin,systemd
-services.throttle_io_read,,a dimension per systemd service,KiB/s,"Systemd Services Throttle Disk Read Bandwidth",stacked,,cgroups.plugin,systemd
-services.services.throttle_io_write,,a dimension per systemd service,KiB/s,"Systemd Services Throttle Disk Write Bandwidth",stacked,,cgroups.plugin,systemd
-services.throttle_io_ops_read,,a dimension per systemd service,operations/s,"Systemd Services Throttle Disk Read Operations",stacked,,cgroups.plugin,systemd
-throttle_io_ops_write,,a dimension per systemd service,operations/s,"Systemd Services Throttle Disk Write Operations",stacked,,cgroups.plugin,systemd
-services.queued_io_ops_read,,a dimension per systemd service,operations/s,"Systemd Services Queued Disk Read Operations",stacked,,cgroups.plugin,systemd
-services.queued_io_ops_write,,a dimension per systemd service,operations/s,"Systemd Services Queued Disk Write Operations",stacked,,cgroups.plugin,systemd
-services.merged_io_ops_read,,a dimension per systemd service,operations/s,"Systemd Services Merged Disk Read Operations",stacked,,cgroups.plugin,systemd
-services.merged_io_ops_write,,a dimension per systemd service,operations/s,"Systemd Services Merged Disk Write Operations",stacked,,cgroups.plugin,systemd \ No newline at end of file
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c
index fb805e63c..9c7488c82 100644
--- a/collectors/cgroups.plugin/sys_fs_cgroup.c
+++ b/collectors/cgroups.plugin/sys_fs_cgroup.c
@@ -62,6 +62,8 @@ static int cgroup_enable_pressure_io_some = CONFIG_BOOLEAN_AUTO;
static int cgroup_enable_pressure_io_full = CONFIG_BOOLEAN_AUTO;
static int cgroup_enable_pressure_memory_some = CONFIG_BOOLEAN_AUTO;
static int cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_AUTO;
+static int cgroup_enable_pressure_irq_some = CONFIG_BOOLEAN_NO;
+static int cgroup_enable_pressure_irq_full = CONFIG_BOOLEAN_AUTO;
static int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES;
static int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
@@ -474,6 +476,7 @@ void read_cgroup_plugin_configuration() {
" !*.mount "
" !*.partition "
" !*.service "
+ " !*.service/udev "
" !*.socket "
" !*.slice "
" !*.swap "
@@ -828,6 +831,7 @@ struct cgroup {
struct pressure cpu_pressure;
struct pressure io_pressure;
struct pressure memory_pressure;
+ struct pressure irq_pressure;
// per cgroup charts
RRDSET *st_cpu;
@@ -1451,28 +1455,33 @@ static inline void cgroup2_read_pressure(struct pressure *res) {
return;
}
- res->some.share_time.value10 = strtod(procfile_lineword(ff, 0, 2), NULL);
- res->some.share_time.value60 = strtod(procfile_lineword(ff, 0, 4), NULL);
- res->some.share_time.value300 = strtod(procfile_lineword(ff, 0, 6), NULL);
- res->some.total_time.value_total = str2ull(procfile_lineword(ff, 0, 8), NULL) / 1000; // us->ms
+ bool did_some = false, did_full = false;
- if (lines > 2) {
- res->full.share_time.value10 = strtod(procfile_lineword(ff, 1, 2), NULL);
- res->full.share_time.value60 = strtod(procfile_lineword(ff, 1, 4), NULL);
- res->full.share_time.value300 = strtod(procfile_lineword(ff, 1, 6), NULL);
- res->full.total_time.value_total = str2ull(procfile_lineword(ff, 1, 8), NULL) / 1000; // us->ms
+ for(size_t l = 0; l < lines ;l++) {
+ const char *key = procfile_lineword(ff, l, 0);
+ if(strcmp(key, "some") == 0) {
+ res->some.share_time.value10 = strtod(procfile_lineword(ff, l, 2), NULL);
+ res->some.share_time.value60 = strtod(procfile_lineword(ff, l, 4), NULL);
+ res->some.share_time.value300 = strtod(procfile_lineword(ff, l, 6), NULL);
+ res->some.total_time.value_total = str2ull(procfile_lineword(ff, l, 8), NULL) / 1000; // us->ms
+ did_some = true;
+ }
+ else if(strcmp(key, "full") == 0) {
+ res->full.share_time.value10 = strtod(procfile_lineword(ff, l, 2), NULL);
+ res->full.share_time.value60 = strtod(procfile_lineword(ff, l, 4), NULL);
+ res->full.share_time.value300 = strtod(procfile_lineword(ff, l, 6), NULL);
+ res->full.total_time.value_total = str2ull(procfile_lineword(ff, l, 8), NULL) / 1000; // us->ms
+ did_full = true;
+ }
}
- res->updated = 1;
+ res->updated = (did_full || did_some) ? 1 : 0;
- if (unlikely(res->some.enabled == CONFIG_BOOLEAN_AUTO)) {
- res->some.enabled = CONFIG_BOOLEAN_YES;
- if (lines > 2) {
- res->full.enabled = CONFIG_BOOLEAN_YES;
- } else {
- res->full.enabled = CONFIG_BOOLEAN_NO;
- }
- }
+ if(unlikely(res->some.enabled == CONFIG_BOOLEAN_AUTO))
+ res->some.enabled = (did_some) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO;
+
+ if(unlikely(res->full.enabled == CONFIG_BOOLEAN_AUTO))
+ res->full.enabled = (did_full) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO;
}
}
@@ -1637,6 +1646,7 @@ static inline void read_cgroup(struct cgroup *cg) {
cgroup2_read_pressure(&cg->cpu_pressure);
cgroup2_read_pressure(&cg->io_pressure);
cgroup2_read_pressure(&cg->memory_pressure);
+ cgroup2_read_pressure(&cg->irq_pressure);
cgroup_read_memory(&cg->memory, 1);
}
}
@@ -1851,6 +1861,7 @@ static inline void cgroup_free(struct cgroup *cg) {
free_pressure(&cg->cpu_pressure);
free_pressure(&cg->io_pressure);
free_pressure(&cg->memory_pressure);
+ free_pressure(&cg->irq_pressure);
freez(cg->id);
freez(cg->intermediate_id);
@@ -2465,6 +2476,18 @@ static inline void discovery_update_filenames() {
netdata_log_debug(D_CGROUP, "memory.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
}
}
+
+ if (unlikely((cgroup_enable_pressure_irq_some || cgroup_enable_pressure_irq_full) && !cg->irq_pressure.filename)) {
+ snprintfz(filename, FILENAME_MAX, "%s%s/irq.pressure", cgroup_unified_base, cg->id);
+ if (likely(stat(filename, &buf) != -1)) {
+ cg->irq_pressure.filename = strdupz(filename);
+ cg->irq_pressure.some.enabled = cgroup_enable_pressure_irq_some;
+ cg->irq_pressure.full.enabled = cgroup_enable_pressure_irq_full;
+ netdata_log_debug(D_CGROUP, "irq.pressure filename for cgroup '%s': '%s'", cg->id, cg->irq_pressure.filename);
+ } else {
+ netdata_log_debug(D_CGROUP, "irq.pressure file for cgroup '%s': '%s' does not exist", cg->id, filename);
+ }
+ }
}
}
}
@@ -4643,6 +4666,112 @@ void update_cgroup_charts(int update_every) {
update_pressure_charts(pcs);
}
+ res = &cg->irq_pressure;
+
+ if (likely(res->updated && res->some.enabled)) {
+ struct pressure_charts *pcs;
+ pcs = &res->some;
+
+ if (unlikely(!pcs->share_time.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "IRQ some pressure");
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "irq_some_pressure"
+ , NULL
+ , "interrupts"
+ , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure" : "cgroup.irq_some_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2310
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ if (unlikely(!pcs->total_time.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "IRQ some pressure stall time");
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "irq_some_pressure_stall_time"
+ , NULL
+ , "interrupts"
+ , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure_stall_time" : "cgroup.irq_some_pressure_stall_time"
+ , title
+ , "ms"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2330
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ update_pressure_charts(pcs);
+ }
+
+ if (likely(res->updated && res->full.enabled)) {
+ struct pressure_charts *pcs;
+ pcs = &res->full;
+
+ if (unlikely(!pcs->share_time.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "IRQ full pressure");
+
+ chart = pcs->share_time.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "irq_full_pressure"
+ , NULL
+ , "interrupts"
+ , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure" : "cgroup.irq_full_pressure"
+ , title
+ , "percentage"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2350
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdset_update_rrdlabels(chart = pcs->share_time.st, cg->chart_labels);
+ pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ if (unlikely(!pcs->total_time.st)) {
+ RRDSET *chart;
+ snprintfz(title, CHART_TITLE_MAX, "IRQ full pressure stall time");
+ chart = pcs->total_time.st = rrdset_create_localhost(
+ cgroup_chart_type(type, cg->chart_id, RRD_ID_LENGTH_MAX)
+ , "irq_full_pressure_stall_time"
+ , NULL
+ , "interrupts"
+ , k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure_stall_time" : "cgroup.irq_full_pressure_stall_time"
+ , title
+ , "ms"
+ , PLUGIN_CGROUPS_NAME
+ , PLUGIN_CGROUPS_MODULE_CGROUPS_NAME
+ , cgroup_containers_chart_priority + 2370
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+ rrdset_update_rrdlabels(chart = pcs->total_time.st, cg->chart_labels);
+ pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ update_pressure_charts(pcs);
+ }
+
res = &cg->io_pressure;
if (likely(res->updated && res->some.enabled)) {