summaryrefslogtreecommitdiffstats
path: root/collectors/proc.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/proc.plugin')
-rw-r--r--collectors/proc.plugin/integrations/amd_gpu.md1
-rw-r--r--collectors/proc.plugin/integrations/btrfs.md1
-rw-r--r--collectors/proc.plugin/integrations/conntrack.md1
-rw-r--r--collectors/proc.plugin/integrations/disk_statistics.md1
-rw-r--r--collectors/proc.plugin/integrations/entropy.md1
-rw-r--r--collectors/proc.plugin/integrations/infiniband.md1
-rw-r--r--collectors/proc.plugin/integrations/inter_process_communication.md1
-rw-r--r--collectors/proc.plugin/integrations/interrupts.md1
-rw-r--r--collectors/proc.plugin/integrations/ip_virtual_server.md1
-rw-r--r--collectors/proc.plugin/integrations/ipv6_socket_statistics.md1
-rw-r--r--collectors/proc.plugin/integrations/kernel_same-page_merging.md1
-rw-r--r--collectors/proc.plugin/integrations/md_raid.md1
-rw-r--r--collectors/proc.plugin/integrations/memory_modules_dimms.md1
-rw-r--r--collectors/proc.plugin/integrations/memory_statistics.md1
-rw-r--r--collectors/proc.plugin/integrations/memory_usage.md1
-rw-r--r--collectors/proc.plugin/integrations/network_interfaces.md1
-rw-r--r--collectors/proc.plugin/integrations/network_statistics.md1
-rw-r--r--collectors/proc.plugin/integrations/nfs_client.md1
-rw-r--r--collectors/proc.plugin/integrations/nfs_server.md1
-rw-r--r--collectors/proc.plugin/integrations/non-uniform_memory_access.md1
-rw-r--r--collectors/proc.plugin/integrations/page_types.md1
-rw-r--r--collectors/proc.plugin/integrations/power_supply.md1
-rw-r--r--collectors/proc.plugin/integrations/pressure_stall_information.md1
-rw-r--r--collectors/proc.plugin/integrations/sctp_statistics.md1
-rw-r--r--collectors/proc.plugin/integrations/socket_statistics.md1
-rw-r--r--collectors/proc.plugin/integrations/softirq_statistics.md1
-rw-r--r--collectors/proc.plugin/integrations/softnet_statistics.md1
-rw-r--r--collectors/proc.plugin/integrations/synproxy.md1
-rw-r--r--collectors/proc.plugin/integrations/system_load_average.md1
-rw-r--r--collectors/proc.plugin/integrations/system_statistics.md1
-rw-r--r--collectors/proc.plugin/integrations/system_uptime.md1
-rw-r--r--collectors/proc.plugin/integrations/wireless_network_interfaces.md1
-rw-r--r--collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md1
-rw-r--r--collectors/proc.plugin/integrations/zfs_pools.md1
-rw-r--r--collectors/proc.plugin/integrations/zram.md1
-rw-r--r--collectors/proc.plugin/ipc.c12
-rw-r--r--collectors/proc.plugin/plugin_proc.c76
-rw-r--r--collectors/proc.plugin/plugin_proc.h3
-rw-r--r--collectors/proc.plugin/proc_diskstats.c573
-rw-r--r--collectors/proc.plugin/proc_interrupts.c6
-rw-r--r--collectors/proc.plugin/proc_mdstat.c42
-rw-r--r--collectors/proc.plugin/proc_net_dev.c509
-rw-r--r--collectors/proc.plugin/proc_net_softnet_stat.c4
-rw-r--r--collectors/proc.plugin/proc_net_wireless.c13
-rw-r--r--collectors/proc.plugin/proc_pagetypeinfo.c8
-rw-r--r--collectors/proc.plugin/proc_softirqs.c6
-rw-r--r--collectors/proc.plugin/proc_spl_kstat_zfs.c7
-rw-r--r--collectors/proc.plugin/proc_stat.c2
-rw-r--r--collectors/proc.plugin/sys_block_zram.c2
-rw-r--r--collectors/proc.plugin/sys_class_drm.c30
-rw-r--r--collectors/proc.plugin/sys_class_power_supply.c4
-rw-r--r--collectors/proc.plugin/sys_devices_pci_aer.c4
-rw-r--r--collectors/proc.plugin/sys_fs_btrfs.c38
53 files changed, 1099 insertions, 275 deletions
diff --git a/collectors/proc.plugin/integrations/amd_gpu.md b/collectors/proc.plugin/integrations/amd_gpu.md
index c9964dbb7..e85cce221 100644
--- a/collectors/proc.plugin/integrations/amd_gpu.md
+++ b/collectors/proc.plugin/integrations/amd_gpu.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "AMD GPU"
learn_status: "Published"
learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/btrfs.md b/collectors/proc.plugin/integrations/btrfs.md
index 7c0764cf0..5f994c841 100644
--- a/collectors/proc.plugin/integrations/btrfs.md
+++ b/collectors/proc.plugin/integrations/btrfs.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "BTRFS"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Filesystem/BTRFS"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/conntrack.md b/collectors/proc.plugin/integrations/conntrack.md
index 543aafc16..b38f6b508 100644
--- a/collectors/proc.plugin/integrations/conntrack.md
+++ b/collectors/proc.plugin/integrations/conntrack.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Conntrack"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Firewall"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/disk_statistics.md b/collectors/proc.plugin/integrations/disk_statistics.md
index fc2ce5b08..8f7448c39 100644
--- a/collectors/proc.plugin/integrations/disk_statistics.md
+++ b/collectors/proc.plugin/integrations/disk_statistics.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Disk Statistics"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Disk"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/entropy.md b/collectors/proc.plugin/integrations/entropy.md
index debf2e75e..8432a1f96 100644
--- a/collectors/proc.plugin/integrations/entropy.md
+++ b/collectors/proc.plugin/integrations/entropy.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Entropy"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/System"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/infiniband.md b/collectors/proc.plugin/integrations/infiniband.md
index 6ebefe73e..6cb5fdc53 100644
--- a/collectors/proc.plugin/integrations/infiniband.md
+++ b/collectors/proc.plugin/integrations/infiniband.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "InfiniBand"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Network"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/inter_process_communication.md b/collectors/proc.plugin/integrations/inter_process_communication.md
index b36b02d3b..55708a4b0 100644
--- a/collectors/proc.plugin/integrations/inter_process_communication.md
+++ b/collectors/proc.plugin/integrations/inter_process_communication.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Inter Process Communication"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/IPC"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/interrupts.md b/collectors/proc.plugin/integrations/interrupts.md
index 756324163..1b85fb767 100644
--- a/collectors/proc.plugin/integrations/interrupts.md
+++ b/collectors/proc.plugin/integrations/interrupts.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Interrupts"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/CPU"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/ip_virtual_server.md b/collectors/proc.plugin/integrations/ip_virtual_server.md
index 22f43544e..5c7afd2eb 100644
--- a/collectors/proc.plugin/integrations/ip_virtual_server.md
+++ b/collectors/proc.plugin/integrations/ip_virtual_server.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "IP Virtual Server"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Network"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/ipv6_socket_statistics.md b/collectors/proc.plugin/integrations/ipv6_socket_statistics.md
index bf0fbaa00..2c1ee2721 100644
--- a/collectors/proc.plugin/integrations/ipv6_socket_statistics.md
+++ b/collectors/proc.plugin/integrations/ipv6_socket_statistics.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "IPv6 Socket Statistics"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Network"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/kernel_same-page_merging.md b/collectors/proc.plugin/integrations/kernel_same-page_merging.md
index bed7891bd..336f0feaf 100644
--- a/collectors/proc.plugin/integrations/kernel_same-page_merging.md
+++ b/collectors/proc.plugin/integrations/kernel_same-page_merging.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Kernel Same-Page Merging"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Memory"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/md_raid.md b/collectors/proc.plugin/integrations/md_raid.md
index ef78b8269..34a4840bb 100644
--- a/collectors/proc.plugin/integrations/md_raid.md
+++ b/collectors/proc.plugin/integrations/md_raid.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "MD RAID"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Disk"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/memory_modules_dimms.md b/collectors/proc.plugin/integrations/memory_modules_dimms.md
index dc59fe5fc..351c6fcd7 100644
--- a/collectors/proc.plugin/integrations/memory_modules_dimms.md
+++ b/collectors/proc.plugin/integrations/memory_modules_dimms.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Memory modules (DIMMs)"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Memory"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/memory_statistics.md b/collectors/proc.plugin/integrations/memory_statistics.md
index 712b4b5e8..52f1bf530 100644
--- a/collectors/proc.plugin/integrations/memory_statistics.md
+++ b/collectors/proc.plugin/integrations/memory_statistics.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Memory Statistics"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Memory"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/memory_usage.md b/collectors/proc.plugin/integrations/memory_usage.md
index 0eef72b12..141bd29ad 100644
--- a/collectors/proc.plugin/integrations/memory_usage.md
+++ b/collectors/proc.plugin/integrations/memory_usage.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Memory Usage"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Memory"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/network_interfaces.md b/collectors/proc.plugin/integrations/network_interfaces.md
index 0d26b5b66..0cfd56fae 100644
--- a/collectors/proc.plugin/integrations/network_interfaces.md
+++ b/collectors/proc.plugin/integrations/network_interfaces.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Network interfaces"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Network"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/network_statistics.md b/collectors/proc.plugin/integrations/network_statistics.md
index f43da8339..726fd9d61 100644
--- a/collectors/proc.plugin/integrations/network_statistics.md
+++ b/collectors/proc.plugin/integrations/network_statistics.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Network statistics"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Network"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/nfs_client.md b/collectors/proc.plugin/integrations/nfs_client.md
index 696e0c0d6..db5847714 100644
--- a/collectors/proc.plugin/integrations/nfs_client.md
+++ b/collectors/proc.plugin/integrations/nfs_client.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "NFS Client"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Filesystem/NFS"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/nfs_server.md b/collectors/proc.plugin/integrations/nfs_server.md
index ddbf03f90..0c906b4d8 100644
--- a/collectors/proc.plugin/integrations/nfs_server.md
+++ b/collectors/proc.plugin/integrations/nfs_server.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "NFS Server"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Filesystem/NFS"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/non-uniform_memory_access.md b/collectors/proc.plugin/integrations/non-uniform_memory_access.md
index 58b96a3e7..6f495fb79 100644
--- a/collectors/proc.plugin/integrations/non-uniform_memory_access.md
+++ b/collectors/proc.plugin/integrations/non-uniform_memory_access.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Non-Uniform Memory Access"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Memory"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/page_types.md b/collectors/proc.plugin/integrations/page_types.md
index 7f84182de..b228629b6 100644
--- a/collectors/proc.plugin/integrations/page_types.md
+++ b/collectors/proc.plugin/integrations/page_types.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Page types"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Memory"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/power_supply.md b/collectors/proc.plugin/integrations/power_supply.md
index 4980f845b..9a474e82a 100644
--- a/collectors/proc.plugin/integrations/power_supply.md
+++ b/collectors/proc.plugin/integrations/power_supply.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Power Supply"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Power Supply"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/pressure_stall_information.md b/collectors/proc.plugin/integrations/pressure_stall_information.md
index e590a8d38..53f4aa050 100644
--- a/collectors/proc.plugin/integrations/pressure_stall_information.md
+++ b/collectors/proc.plugin/integrations/pressure_stall_information.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Pressure Stall Information"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Pressure"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/sctp_statistics.md b/collectors/proc.plugin/integrations/sctp_statistics.md
index ad9c26bf5..15c0d424d 100644
--- a/collectors/proc.plugin/integrations/sctp_statistics.md
+++ b/collectors/proc.plugin/integrations/sctp_statistics.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "SCTP Statistics"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Network"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/socket_statistics.md b/collectors/proc.plugin/integrations/socket_statistics.md
index 2c59f9883..d8ef26647 100644
--- a/collectors/proc.plugin/integrations/socket_statistics.md
+++ b/collectors/proc.plugin/integrations/socket_statistics.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Socket statistics"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Network"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/softirq_statistics.md b/collectors/proc.plugin/integrations/softirq_statistics.md
index 56cf9ab5c..f966cf971 100644
--- a/collectors/proc.plugin/integrations/softirq_statistics.md
+++ b/collectors/proc.plugin/integrations/softirq_statistics.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "SoftIRQ statistics"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/CPU"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/softnet_statistics.md b/collectors/proc.plugin/integrations/softnet_statistics.md
index 84ac5ac88..58e6cf6e5 100644
--- a/collectors/proc.plugin/integrations/softnet_statistics.md
+++ b/collectors/proc.plugin/integrations/softnet_statistics.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Softnet Statistics"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Network"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/synproxy.md b/collectors/proc.plugin/integrations/synproxy.md
index 04169773b..2db17ef6f 100644
--- a/collectors/proc.plugin/integrations/synproxy.md
+++ b/collectors/proc.plugin/integrations/synproxy.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Synproxy"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Firewall"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/system_load_average.md b/collectors/proc.plugin/integrations/system_load_average.md
index caff72737..6e986d90c 100644
--- a/collectors/proc.plugin/integrations/system_load_average.md
+++ b/collectors/proc.plugin/integrations/system_load_average.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "System Load Average"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/System"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/system_statistics.md b/collectors/proc.plugin/integrations/system_statistics.md
index 2932dd8d2..f3df1a19a 100644
--- a/collectors/proc.plugin/integrations/system_statistics.md
+++ b/collectors/proc.plugin/integrations/system_statistics.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "System statistics"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/System"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/system_uptime.md b/collectors/proc.plugin/integrations/system_uptime.md
index 7eedd4313..0954c0642 100644
--- a/collectors/proc.plugin/integrations/system_uptime.md
+++ b/collectors/proc.plugin/integrations/system_uptime.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "System Uptime"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/System"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/wireless_network_interfaces.md b/collectors/proc.plugin/integrations/wireless_network_interfaces.md
index 57375b975..a8d2406ee 100644
--- a/collectors/proc.plugin/integrations/wireless_network_interfaces.md
+++ b/collectors/proc.plugin/integrations/wireless_network_interfaces.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "Wireless network interfaces"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Network"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md b/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md
index d62d12ee6..c200ba673 100644
--- a/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md
+++ b/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "ZFS Adaptive Replacement Cache"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Filesystem/ZFS"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/zfs_pools.md b/collectors/proc.plugin/integrations/zfs_pools.md
index b913572e3..2985d39b0 100644
--- a/collectors/proc.plugin/integrations/zfs_pools.md
+++ b/collectors/proc.plugin/integrations/zfs_pools.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "ZFS Pools"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Filesystem/ZFS"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/integrations/zram.md b/collectors/proc.plugin/integrations/zram.md
index 0bcda3eaf..111b17c62 100644
--- a/collectors/proc.plugin/integrations/zram.md
+++ b/collectors/proc.plugin/integrations/zram.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugi
sidebar_label: "ZRAM"
learn_status: "Published"
learn_rel_path: "Data Collection/Linux Systems/Memory"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/proc.plugin/ipc.c b/collectors/proc.plugin/ipc.c
index b166deba6..204977bdf 100644
--- a/collectors/proc.plugin/ipc.c
+++ b/collectors/proc.plugin/ipc.c
@@ -451,8 +451,8 @@ int do_ipc(int update_every, usec_t dt) {
msq->found = 0;
}
else {
- rrddim_is_obsolete(st_msq_messages, msq->rd_messages);
- rrddim_is_obsolete(st_msq_bytes, msq->rd_bytes);
+ rrddim_is_obsolete___safe_from_collector_thread(st_msq_messages, msq->rd_messages);
+ rrddim_is_obsolete___safe_from_collector_thread(st_msq_bytes, msq->rd_bytes);
// remove message queue from the linked list
if(!msq_prev)
@@ -480,19 +480,19 @@ int do_ipc(int update_every, usec_t dt) {
if(unlikely(dimensions_num > dimensions_limit)) {
collector_info("Message queue statistics has been disabled");
collector_info("There are %lld dimensions in memory but limit was set to %lld", dimensions_num, dimensions_limit);
- rrdset_is_obsolete(st_msq_messages);
- rrdset_is_obsolete(st_msq_bytes);
+ rrdset_is_obsolete___safe_from_collector_thread(st_msq_messages);
+ rrdset_is_obsolete___safe_from_collector_thread(st_msq_bytes);
st_msq_messages = NULL;
st_msq_bytes = NULL;
do_msg = CONFIG_BOOLEAN_NO;
}
else if(unlikely(!message_queue_root)) {
collector_info("Making chart %s (%s) obsolete since it does not have any dimensions", rrdset_name(st_msq_messages), rrdset_id(st_msq_messages));
- rrdset_is_obsolete(st_msq_messages);
+ rrdset_is_obsolete___safe_from_collector_thread(st_msq_messages);
st_msq_messages = NULL;
collector_info("Making chart %s (%s) obsolete since it does not have any dimensions", rrdset_name(st_msq_bytes), rrdset_id(st_msq_bytes));
- rrdset_is_obsolete(st_msq_bytes);
+ rrdset_is_obsolete___safe_from_collector_thread(st_msq_bytes);
st_msq_bytes = NULL;
}
}
diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c
index fbcaa614a..3f11aaf6c 100644
--- a/collectors/proc.plugin/plugin_proc.c
+++ b/collectors/proc.plugin/plugin_proc.c
@@ -138,10 +138,18 @@ static bool is_lxcfs_proc_mounted() {
return false;
}
+static bool log_proc_module(BUFFER *wb, void *data) {
+ struct proc_module *pm = data;
+ buffer_sprintf(wb, "proc.plugin[%s]", pm->name);
+ return true;
+}
+
void *proc_main(void *ptr)
{
worker_register("PROC");
+ rrd_collector_started();
+
if (config_get_boolean("plugin:proc", "/proc/net/dev", CONFIG_BOOLEAN_YES)) {
netdev_thread = mallocz(sizeof(netdata_thread_t));
netdata_log_debug(D_SYSTEM, "Starting thread %s.", THREAD_NETDEV_NAME);
@@ -151,46 +159,56 @@ void *proc_main(void *ptr)
netdata_thread_cleanup_push(proc_main_cleanup, ptr);
- config_get_boolean("plugin:proc", "/proc/pagetypeinfo", CONFIG_BOOLEAN_NO);
+ {
+ config_get_boolean("plugin:proc", "/proc/pagetypeinfo", CONFIG_BOOLEAN_NO);
- // check the enabled status for each module
- int i;
- for (i = 0; proc_modules[i].name; i++) {
- struct proc_module *pm = &proc_modules[i];
+ // check the enabled status for each module
+ int i;
+ for(i = 0; proc_modules[i].name; i++) {
+ struct proc_module *pm = &proc_modules[i];
- pm->enabled = config_get_boolean("plugin:proc", pm->name, CONFIG_BOOLEAN_YES);
- pm->rd = NULL;
+ pm->enabled = config_get_boolean("plugin:proc", pm->name, CONFIG_BOOLEAN_YES);
+ pm->rd = NULL;
- worker_register_job_name(i, proc_modules[i].dim);
- }
+ worker_register_job_name(i, proc_modules[i].dim);
+ }
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
+ usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
- inside_lxc_container = is_lxcfs_proc_mounted();
+ inside_lxc_container = is_lxcfs_proc_mounted();
- while (service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
+#define LGS_MODULE_ID 0
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
+ ND_LOG_STACK lgs[] = {
+ [LGS_MODULE_ID] = ND_LOG_FIELD_TXT(NDF_MODULE, "proc.plugin"),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
- for (i = 0; proc_modules[i].name; i++) {
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
+ while(service_running(SERVICE_COLLECTORS)) {
+ worker_is_idle();
+ usec_t hb_dt = heartbeat_next(&hb, step);
- struct proc_module *pm = &proc_modules[i];
- if (unlikely(!pm->enabled))
- continue;
+ if(unlikely(!service_running(SERVICE_COLLECTORS)))
+ break;
- netdata_log_debug(D_PROCNETDEV_LOOP, "PROC calling %s.", pm->name);
+ for(i = 0; proc_modules[i].name; i++) {
+ if(unlikely(!service_running(SERVICE_COLLECTORS)))
+ break;
- worker_is_busy(i);
- pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt);
- }
- }
+ struct proc_module *pm = &proc_modules[i];
+ if(unlikely(!pm->enabled))
+ continue;
+
+ worker_is_busy(i);
+ lgs[LGS_MODULE_ID] = ND_LOG_FIELD_CB(NDF_MODULE, log_proc_module, pm);
+ pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt);
+ lgs[LGS_MODULE_ID] = ND_LOG_FIELD_TXT(NDF_MODULE, "proc.plugin");
+ }
+ }
+ }
netdata_thread_cleanup_pop(1);
return NULL;
diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h
index a0ddd76c4..e4fc105ba 100644
--- a/collectors/proc.plugin/plugin_proc.h
+++ b/collectors/proc.plugin/plugin_proc.h
@@ -59,7 +59,8 @@ void netdev_rename_device_add(
const char *container_device,
const char *container_name,
RRDLABELS *labels,
- const char *ctx_prefix);
+ const char *ctx_prefix,
+ const DICTIONARY_ITEM *cgroup_netdev_link);
void netdev_rename_device_del(const char *host_device);
diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c
index e65c42212..475d90835 100644
--- a/collectors/proc.plugin/proc_diskstats.c
+++ b/collectors/proc.plugin/proc_diskstats.c
@@ -6,6 +6,8 @@
#define PLUGIN_PROC_MODULE_DISKSTATS_NAME "/proc/diskstats"
#define CONFIG_SECTION_PLUGIN_PROC_DISKSTATS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_DISKSTATS_NAME
+#define RRDFUNCTIONS_DISKSTATS_HELP "View block device statistics"
+
#define DISK_TYPE_UNKNOWN 0
#define DISK_TYPE_PHYSICAL 1
#define DISK_TYPE_PARTITION 2
@@ -14,6 +16,8 @@
#define DEFAULT_PREFERRED_IDS "*"
#define DEFAULT_EXCLUDED_DISKS "loop* ram*"
+static netdata_mutex_t diskstats_dev_mutex = NETDATA_MUTEX_INITIALIZER;
+
static struct disk {
char *disk; // the name of the disk (sda, sdb, etc, after being looked up)
char *device; // the device of the disk (before being looked up)
@@ -28,6 +32,9 @@ static struct disk {
int sector_size;
int type;
+ bool excluded;
+ bool function_ready;
+
char *mount_point;
char *chart_id;
@@ -168,7 +175,7 @@ static struct disk {
struct disk *next;
} *disk_root = NULL;
-#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st)
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st)
// static char *path_to_get_hw_sector_size = NULL;
// static char *path_to_get_hw_sector_size_partitions = NULL;
@@ -359,7 +366,10 @@ static inline int get_disk_name_from_path(const char *path, char *result, size_t
DIR *dir = opendir(path);
if (!dir) {
- collector_error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'.", disk, major, minor, path);
+ if (errno == ENOENT)
+ nd_log_collector(NDLP_DEBUG, "DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s': no such file or directory.", disk, major, minor, path);
+ else
+ collector_error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'.", disk, major, minor, path);
goto failed;
}
@@ -490,7 +500,7 @@ static inline bool ends_with(const char *str, const char *suffix) {
static inline char *get_disk_by_id(char *device) {
char pathname[256 + 1];
- snprintfz(pathname, 256, "%s/by-id", path_to_dev_disk);
+ snprintfz(pathname, sizeof(pathname) - 1, "%s/by-id", path_to_dev_disk);
struct dirent *entry;
DIR *dp = opendir(pathname);
@@ -536,21 +546,25 @@ static inline char *get_disk_model(char *device) {
char path[256 + 1];
char buffer[256 + 1];
- snprintfz(path, 256, "%s/%s/device/model", path_to_sys_block, device);
+ snprintfz(path, sizeof(path) - 1, "%s/%s/device/model", path_to_sys_block, device);
if(read_file(path, buffer, 256) != 0) {
- snprintfz(path, 256, "%s/%s/device/name", path_to_sys_block, device);
+ snprintfz(path, sizeof(path) - 1, "%s/%s/device/name", path_to_sys_block, device);
if(read_file(path, buffer, 256) != 0)
return NULL;
}
- return strdupz(buffer);
+ char *clean = trim(buffer);
+ if (!clean)
+ return NULL;
+
+ return strdupz(clean);
}
static inline char *get_disk_serial(char *device) {
char path[256 + 1];
char buffer[256 + 1];
- snprintfz(path, 256, "%s/%s/device/serial", path_to_sys_block, device);
+ snprintfz(path, sizeof(path) - 1, "%s/%s/device/serial", path_to_sys_block, device);
if(read_file(path, buffer, 256) != 0)
return NULL;
@@ -582,13 +596,17 @@ static inline char *get_disk_serial(char *device) {
static void get_disk_config(struct disk *d) {
int def_enable = global_enable_new_disks_detected_at_runtime;
- if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk)))
+ if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk))) {
+ d->excluded = true;
def_enable = CONFIG_BOOLEAN_NO;
+ }
char var_name[4096 + 1];
snprintfz(var_name, 4096, CONFIG_SECTION_PLUGIN_PROC_DISKSTATS ":%s", d->disk);
- def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable);
+ if (config_exists(var_name, "enable"))
+ def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable);
+
if(unlikely(def_enable == CONFIG_BOOLEAN_NO)) {
// the user does not want any metrics for this disk
d->do_io = CONFIG_BOOLEAN_NO;
@@ -640,7 +658,8 @@ static void get_disk_config(struct disk *d) {
// def_performance
// check the user configuration (this will also show our 'on demand' decision)
- def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance);
+ if (config_exists(var_name, "enable performance metrics"))
+ def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance);
int ddo_io = CONFIG_BOOLEAN_NO,
ddo_ops = CONFIG_BOOLEAN_NO,
@@ -663,21 +682,44 @@ static void get_disk_config(struct disk *d) {
ddo_ext = global_do_ext,
ddo_backlog = global_do_backlog,
ddo_bcache = global_do_bcache;
+ } else {
+ d->excluded = true;
}
- d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io);
- d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops);
- d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops);
- d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime);
- d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops);
- d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util);
- d->do_ext = config_get_boolean_ondemand(var_name, "extended operations", ddo_ext);
- d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog);
-
- if(d->device_is_bcache)
- d->do_bcache = config_get_boolean_ondemand(var_name, "bcache", ddo_bcache);
- else
+ d->do_io = ddo_io;
+ d->do_ops = ddo_ops;
+ d->do_mops = ddo_mops;
+ d->do_iotime = ddo_iotime;
+ d->do_qops = ddo_qops;
+ d->do_util = ddo_util;
+ d->do_ext = ddo_ext;
+ d->do_backlog = ddo_backlog;
+
+ if (config_exists(var_name, "bandwidth"))
+ d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io);
+ if (config_exists(var_name, "operations"))
+ d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops);
+ if (config_exists(var_name, "merged operations"))
+ d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops);
+ if (config_exists(var_name, "i/o time"))
+ d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime);
+ if (config_exists(var_name, "queued operations"))
+ d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops);
+ if (config_exists(var_name, "utilization percentage"))
+ d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util);
+ if (config_exists(var_name, "extended operations"))
+ d->do_ext = config_get_boolean_ondemand(var_name, "extended operations", ddo_ext);
+ if (config_exists(var_name, "backlog"))
+ d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog);
+
+ d->do_bcache = ddo_bcache;
+
+ if (d->device_is_bcache) {
+ if (config_exists(var_name, "bcache"))
+ d->do_bcache = config_get_boolean_ondemand(var_name, "bcache", ddo_bcache);
+ } else {
d->do_bcache = 0;
+ }
}
}
@@ -702,6 +744,8 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis
// create a new disk structure
d = (struct disk *)callocz(1, sizeof(struct disk));
+ d->excluded = false;
+ d->function_ready = false;
d->disk = get_disk_name(major, minor, disk);
d->device = strdupz(disk);
d->disk_by_id = get_disk_by_id(disk);
@@ -963,35 +1007,399 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis
}
get_disk_config(d);
+
return d;
}
+static const char *get_disk_type_string(int disk_type) {
+ switch (disk_type) {
+ case DISK_TYPE_PHYSICAL:
+ return "physical";
+ case DISK_TYPE_PARTITION:
+ return "partition";
+ case DISK_TYPE_VIRTUAL:
+ return "virtual";
+ default:
+ return "unknown";
+ }
+}
+
static void add_labels_to_disk(struct disk *d, RRDSET *st) {
rrdlabels_add(st->rrdlabels, "device", d->disk, RRDLABEL_SRC_AUTO);
rrdlabels_add(st->rrdlabels, "mount_point", d->mount_point, RRDLABEL_SRC_AUTO);
rrdlabels_add(st->rrdlabels, "id", d->disk_by_id, RRDLABEL_SRC_AUTO);
rrdlabels_add(st->rrdlabels, "model", d->model, RRDLABEL_SRC_AUTO);
rrdlabels_add(st->rrdlabels, "serial", d->serial, RRDLABEL_SRC_AUTO);
-// rrdlabels_add(st->rrdlabels, "rotational", d->rotational ? "true" : "false", RRDLABEL_SRC_AUTO);
-// rrdlabels_add(st->rrdlabels, "removable", d->removable ? "true" : "false", RRDLABEL_SRC_AUTO);
+ rrdlabels_add(st->rrdlabels, "device_type", get_disk_type_string(d->type), RRDLABEL_SRC_AUTO);
+}
- switch (d->type) {
- default:
- case DISK_TYPE_UNKNOWN:
- rrdlabels_add(st->rrdlabels, "device_type", "unknown", RRDLABEL_SRC_AUTO);
- break;
+static int diskstats_function_block_devices(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused,
+ void *collector_data __maybe_unused,
+ rrd_function_result_callback_t result_cb, void *result_cb_data,
+ rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
+ rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
+ void *register_canceller_cb_data __maybe_unused) {
+
+ buffer_flush(wb);
+ wb->content_type = CT_APPLICATION_JSON;
+ buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
+
+ buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
+ buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
+ buffer_json_member_add_string(wb, "type", "table");
+ buffer_json_member_add_time_t(wb, "update_every", 1);
+ buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_DISKSTATS_HELP);
+ buffer_json_member_add_array(wb, "data");
+
+ double max_io_reads = 0.0;
+ double max_io_writes = 0.0;
+ double max_io = 0.0;
+ double max_backlog_time = 0.0;
+ double max_busy_time = 0.0;
+ double max_busy_perc = 0.0;
+ double max_iops_reads = 0.0;
+ double max_iops_writes = 0.0;
+ double max_iops_time_reads = 0.0;
+ double max_iops_time_writes = 0.0;
+ double max_iops_avg_time_read = 0.0;
+ double max_iops_avg_time_write = 0.0;
+ double max_iops_avg_size_read = 0.0;
+ double max_iops_avg_size_write = 0.0;
+
+ netdata_mutex_lock(&diskstats_dev_mutex);
+
+ for (struct disk *d = disk_root; d; d = d->next) {
+ if (unlikely(!d->function_ready))
+ continue;
- case DISK_TYPE_PHYSICAL:
- rrdlabels_add(st->rrdlabels, "device_type", "physical", RRDLABEL_SRC_AUTO);
- break;
+ buffer_json_add_array_item_array(wb);
+
+ buffer_json_add_array_item_string(wb, d->device);
+ buffer_json_add_array_item_string(wb, get_disk_type_string(d->type));
+ buffer_json_add_array_item_string(wb, d->disk_by_id);
+ buffer_json_add_array_item_string(wb, d->model);
+ buffer_json_add_array_item_string(wb, d->serial);
+
+ // IO
+ double io_reads = rrddim_get_last_stored_value(d->rd_io_reads, &max_io_reads, 1024.0);
+ double io_writes = rrddim_get_last_stored_value(d->rd_io_writes, &max_io_writes, 1024.0);
+ double io_total = NAN;
+ if (!isnan(io_reads) && !isnan(io_writes)) {
+ io_total = io_reads + io_writes;
+ max_io = MAX(max_io, io_total);
+ }
+ // Backlog and Busy Time
+ double busy_perc = rrddim_get_last_stored_value(d->rd_util_utilization, &max_busy_perc, 1);
+ double busy_time = rrddim_get_last_stored_value(d->rd_busy_busy, &max_busy_time, 1);
+ double backlog_time = rrddim_get_last_stored_value(d->rd_backlog_backlog, &max_backlog_time, 1);
+ // IOPS
+ double iops_reads = rrddim_get_last_stored_value(d->rd_ops_reads, &max_iops_reads, 1);
+ double iops_writes = rrddim_get_last_stored_value(d->rd_ops_writes, &max_iops_writes, 1);
+ // IO Time
+ double iops_time_reads = rrddim_get_last_stored_value(d->rd_iotime_reads, &max_iops_time_reads, 1);
+ double iops_time_writes = rrddim_get_last_stored_value(d->rd_iotime_writes, &max_iops_time_writes, 1);
+ // Avg IO Time
+ double iops_avg_time_read = rrddim_get_last_stored_value(d->rd_await_reads, &max_iops_avg_time_read, 1);
+ double iops_avg_time_write = rrddim_get_last_stored_value(d->rd_await_writes, &max_iops_avg_time_write, 1);
+ // Avg IO Size
+ double iops_avg_size_read = rrddim_get_last_stored_value(d->rd_avgsz_reads, &max_iops_avg_size_read, 1);
+ double iops_avg_size_write = rrddim_get_last_stored_value(d->rd_avgsz_writes, &max_iops_avg_size_write, 1);
+
+
+ buffer_json_add_array_item_double(wb, io_reads);
+ buffer_json_add_array_item_double(wb, io_writes);
+ buffer_json_add_array_item_double(wb, io_total);
+ buffer_json_add_array_item_double(wb, busy_perc);
+ buffer_json_add_array_item_double(wb, busy_time);
+ buffer_json_add_array_item_double(wb, backlog_time);
+ buffer_json_add_array_item_double(wb, iops_reads);
+ buffer_json_add_array_item_double(wb, iops_writes);
+ buffer_json_add_array_item_double(wb, iops_time_reads);
+ buffer_json_add_array_item_double(wb, iops_time_writes);
+ buffer_json_add_array_item_double(wb, iops_avg_time_read);
+ buffer_json_add_array_item_double(wb, iops_avg_time_write);
+ buffer_json_add_array_item_double(wb, iops_avg_size_read);
+ buffer_json_add_array_item_double(wb, iops_avg_size_write);
+
+ // End
+ buffer_json_array_close(wb);
+ }
- case DISK_TYPE_PARTITION:
- rrdlabels_add(st->rrdlabels, "device_type", "partition", RRDLABEL_SRC_AUTO);
- break;
+ netdata_mutex_unlock(&diskstats_dev_mutex);
- case DISK_TYPE_VIRTUAL:
- rrdlabels_add(st->rrdlabels, "device_type", "virtual", RRDLABEL_SRC_AUTO);
- break;
+ buffer_json_array_close(wb); // data
+ buffer_json_member_add_object(wb, "columns");
+ {
+ size_t field_id = 0;
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Device", "Device Name",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "Type", "Device Type",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "ID", "Device ID",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "Model", "Device Model",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "Serial", "Device Serial Number",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Read", "Data Read from Device",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "MiB", max_io_reads, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "Written", "Data Writen to Device",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "MiB", max_io_writes, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "Total", "Data Transferred to and from Device",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "MiB", max_io, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_NONE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Busy%", "Disk Busy Percentage",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "%", max_busy_perc, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "Busy", "Disk Busy Time",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "milliseconds", max_busy_time, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "Backlog", "Disk Backlog",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "milliseconds", max_backlog_time, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Reads", "Completed Read Operations",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "ops", max_iops_reads, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "Writes", "Completed Write Operations",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "ops", max_iops_writes, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "ReadsTime", "Read Operations Time",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "milliseconds", max_iops_time_reads, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "WritesTime", "Write Operations Time",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "milliseconds", max_iops_time_writes, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "ReadAvgTime", "Average Read Operation Service Time",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "milliseconds", max_iops_avg_time_read, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "WriteAvgTime", "Average Write Operation Service Time",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "milliseconds", max_iops_avg_time_write, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "ReadAvgSz", "Average Read Operation Size",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "KiB", max_iops_avg_size_read, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "WriteAvgSz", "Average Write Operation Size",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "KiB", max_iops_avg_size_write, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+ }
+
+ buffer_json_object_close(wb); // columns
+ buffer_json_member_add_string(wb, "default_sort_column", "Total");
+
+ buffer_json_member_add_object(wb, "charts");
+ {
+ buffer_json_member_add_object(wb, "IO");
+ {
+ buffer_json_member_add_string(wb, "name", "IO");
+ buffer_json_member_add_string(wb, "type", "stacked-bar");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "Read");
+ buffer_json_add_array_item_string(wb, "Written");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "Busy");
+ {
+ buffer_json_member_add_string(wb, "name", "Busy");
+ buffer_json_member_add_string(wb, "type", "stacked-bar");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "Busy");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb); // charts
+
+ buffer_json_member_add_array(wb, "default_charts");
+ {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, "IO");
+ buffer_json_add_array_item_string(wb, "Device");
+ buffer_json_array_close(wb);
+
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, "Busy");
+ buffer_json_add_array_item_string(wb, "Device");
+ buffer_json_array_close(wb);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_object(wb, "group_by");
+ {
+ buffer_json_member_add_object(wb, "Type");
+ {
+ buffer_json_member_add_string(wb, "name", "Type");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "Type");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb); // group_by
+
+ buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
+ buffer_json_finalize(wb);
+
+ int response = HTTP_RESP_OK;
+ if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
+ buffer_flush(wb);
+ response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
+ }
+
+ if(result_cb)
+ result_cb(wb, response, result_cb_data);
+
+ return response;
+}
+
+static void diskstats_cleanup_disks() {
+ struct disk *d = disk_root, *last = NULL;
+ while (d) {
+ if (unlikely(global_cleanup_removed_disks && !d->updated)) {
+ struct disk *t = d;
+
+ rrdset_obsolete_and_pointer_null(d->st_avgsz);
+ rrdset_obsolete_and_pointer_null(d->st_ext_avgsz);
+ rrdset_obsolete_and_pointer_null(d->st_await);
+ rrdset_obsolete_and_pointer_null(d->st_ext_await);
+ rrdset_obsolete_and_pointer_null(d->st_backlog);
+ rrdset_obsolete_and_pointer_null(d->st_busy);
+ rrdset_obsolete_and_pointer_null(d->st_io);
+ rrdset_obsolete_and_pointer_null(d->st_ext_io);
+ rrdset_obsolete_and_pointer_null(d->st_iotime);
+ rrdset_obsolete_and_pointer_null(d->st_ext_iotime);
+ rrdset_obsolete_and_pointer_null(d->st_mops);
+ rrdset_obsolete_and_pointer_null(d->st_ext_mops);
+ rrdset_obsolete_and_pointer_null(d->st_ops);
+ rrdset_obsolete_and_pointer_null(d->st_ext_ops);
+ rrdset_obsolete_and_pointer_null(d->st_qops);
+ rrdset_obsolete_and_pointer_null(d->st_svctm);
+ rrdset_obsolete_and_pointer_null(d->st_util);
+ rrdset_obsolete_and_pointer_null(d->st_bcache);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_bypass);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_rates);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_size);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_usage);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_hit_ratio);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_cache_allocations);
+ rrdset_obsolete_and_pointer_null(d->st_bcache_cache_read_races);
+
+ if (d == disk_root) {
+ disk_root = d = d->next;
+ last = NULL;
+ } else if (last) {
+ last->next = d = d->next;
+ }
+
+ freez(t->bcache_filename_dirty_data);
+ freez(t->bcache_filename_writeback_rate);
+ freez(t->bcache_filename_cache_congested);
+ freez(t->bcache_filename_cache_available_percent);
+ freez(t->bcache_filename_stats_five_minute_cache_hit_ratio);
+ freez(t->bcache_filename_stats_hour_cache_hit_ratio);
+ freez(t->bcache_filename_stats_day_cache_hit_ratio);
+ freez(t->bcache_filename_stats_total_cache_hit_ratio);
+ freez(t->bcache_filename_stats_total_cache_hits);
+ freez(t->bcache_filename_stats_total_cache_misses);
+ freez(t->bcache_filename_stats_total_cache_miss_collisions);
+ freez(t->bcache_filename_stats_total_cache_bypass_hits);
+ freez(t->bcache_filename_stats_total_cache_bypass_misses);
+ freez(t->bcache_filename_stats_total_cache_readaheads);
+ freez(t->bcache_filename_cache_read_races);
+ freez(t->bcache_filename_cache_io_errors);
+ freez(t->bcache_filename_priority_stats);
+
+ freez(t->disk);
+ freez(t->device);
+ freez(t->disk_by_id);
+ freez(t->model);
+ freez(t->serial);
+ freez(t->mount_point);
+ freez(t->chart_id);
+ freez(t);
+ } else {
+ d->updated = 0;
+ last = d;
+ d = d->next;
+ }
}
}
@@ -1080,12 +1488,20 @@ int do_proc_diskstats(int update_every, usec_t dt) {
ff = procfile_readall(ff);
if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
+ static bool add_func = true;
+ if (add_func) {
+ rrd_function_add(localhost, NULL, "block-devices", 10, RRDFUNCTIONS_DISKSTATS_HELP, true, diskstats_function_block_devices, NULL);
+ add_func = false;
+ }
+
size_t lines = procfile_lines(ff), l;
collected_number system_read_kb = 0, system_write_kb = 0;
int do_dc_stats = 0, do_fl_stats = 0;
+ netdata_mutex_lock(&diskstats_dev_mutex);
+
for(l = 0; l < lines ;l++) {
// --------------------------------------------------------------------------
// Read parameters
@@ -1210,7 +1626,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
// --------------------------------------------------------------------------
// Do performance metrics
-
if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO &&
(readsectors || writesectors || discardsectors ||
netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
@@ -2056,8 +2471,13 @@ int do_proc_diskstats(int update_every, usec_t dt) {
rrdset_done(d->st_bcache_bypass);
}
}
+
+ d->function_ready = !d->excluded;
}
+ diskstats_cleanup_disks();
+
+ netdata_mutex_unlock(&diskstats_dev_mutex);
// update the system total I/O
if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO &&
@@ -2091,80 +2511,5 @@ int do_proc_diskstats(int update_every, usec_t dt) {
rrdset_done(st_io);
}
- // cleanup removed disks
-
- struct disk *d = disk_root, *last = NULL;
- while(d) {
- if(unlikely(global_cleanup_removed_disks && !d->updated)) {
- struct disk *t = d;
-
- rrdset_obsolete_and_pointer_null(d->st_avgsz);
- rrdset_obsolete_and_pointer_null(d->st_ext_avgsz);
- rrdset_obsolete_and_pointer_null(d->st_await);
- rrdset_obsolete_and_pointer_null(d->st_ext_await);
- rrdset_obsolete_and_pointer_null(d->st_backlog);
- rrdset_obsolete_and_pointer_null(d->st_busy);
- rrdset_obsolete_and_pointer_null(d->st_io);
- rrdset_obsolete_and_pointer_null(d->st_ext_io);
- rrdset_obsolete_and_pointer_null(d->st_iotime);
- rrdset_obsolete_and_pointer_null(d->st_ext_iotime);
- rrdset_obsolete_and_pointer_null(d->st_mops);
- rrdset_obsolete_and_pointer_null(d->st_ext_mops);
- rrdset_obsolete_and_pointer_null(d->st_ops);
- rrdset_obsolete_and_pointer_null(d->st_ext_ops);
- rrdset_obsolete_and_pointer_null(d->st_qops);
- rrdset_obsolete_and_pointer_null(d->st_svctm);
- rrdset_obsolete_and_pointer_null(d->st_util);
- rrdset_obsolete_and_pointer_null(d->st_bcache);
- rrdset_obsolete_and_pointer_null(d->st_bcache_bypass);
- rrdset_obsolete_and_pointer_null(d->st_bcache_rates);
- rrdset_obsolete_and_pointer_null(d->st_bcache_size);
- rrdset_obsolete_and_pointer_null(d->st_bcache_usage);
- rrdset_obsolete_and_pointer_null(d->st_bcache_hit_ratio);
- rrdset_obsolete_and_pointer_null(d->st_bcache_cache_allocations);
- rrdset_obsolete_and_pointer_null(d->st_bcache_cache_read_races);
-
- if(d == disk_root) {
- disk_root = d = d->next;
- last = NULL;
- }
- else if(last) {
- last->next = d = d->next;
- }
-
- freez(t->bcache_filename_dirty_data);
- freez(t->bcache_filename_writeback_rate);
- freez(t->bcache_filename_cache_congested);
- freez(t->bcache_filename_cache_available_percent);
- freez(t->bcache_filename_stats_five_minute_cache_hit_ratio);
- freez(t->bcache_filename_stats_hour_cache_hit_ratio);
- freez(t->bcache_filename_stats_day_cache_hit_ratio);
- freez(t->bcache_filename_stats_total_cache_hit_ratio);
- freez(t->bcache_filename_stats_total_cache_hits);
- freez(t->bcache_filename_stats_total_cache_misses);
- freez(t->bcache_filename_stats_total_cache_miss_collisions);
- freez(t->bcache_filename_stats_total_cache_bypass_hits);
- freez(t->bcache_filename_stats_total_cache_bypass_misses);
- freez(t->bcache_filename_stats_total_cache_readaheads);
- freez(t->bcache_filename_cache_read_races);
- freez(t->bcache_filename_cache_io_errors);
- freez(t->bcache_filename_priority_stats);
-
- freez(t->disk);
- freez(t->device);
- freez(t->disk_by_id);
- freez(t->model);
- freez(t->serial);
- freez(t->mount_point);
- freez(t->chart_id);
- freez(t);
- }
- else {
- d->updated = 0;
- last = d;
- d = d->next;
- }
- }
-
return 0;
}
diff --git a/collectors/proc.plugin/proc_interrupts.c b/collectors/proc.plugin/proc_interrupts.c
index 9a20700a3..37071b22f 100644
--- a/collectors/proc.plugin/proc_interrupts.c
+++ b/collectors/proc.plugin/proc_interrupts.c
@@ -201,10 +201,10 @@ int do_proc_interrupts(int update_every, usec_t dt) {
for(c = 0; c < cpus ;c++) {
if(unlikely(!core_st[c])) {
char id[50+1];
- snprintfz(id, 50, "cpu%d_interrupts", c);
+ snprintfz(id, sizeof(id) - 1, "cpu%d_interrupts", c);
char title[100+1];
- snprintfz(title, 100, "CPU Interrupts");
+ snprintfz(title, sizeof(title) - 1, "CPU Interrupts");
core_st[c] = rrdset_create_localhost(
"cpu"
, id
@@ -221,7 +221,7 @@ int do_proc_interrupts(int update_every, usec_t dt) {
);
char core[50+1];
- snprintfz(core, 50, "cpu%d", c);
+ snprintfz(core, sizeof(core) - 1, "cpu%d", c);
rrdlabels_add(core_st[c]->rrdlabels, "cpu", core, RRDLABEL_SRC_AUTO);
}
diff --git a/collectors/proc.plugin/proc_mdstat.c b/collectors/proc.plugin/proc_mdstat.c
index c3d1793cb..3857d9ec4 100644
--- a/collectors/proc.plugin/proc_mdstat.c
+++ b/collectors/proc.plugin/proc_mdstat.c
@@ -70,10 +70,10 @@ static inline void make_chart_obsolete(char *name, const char *id_modifier)
RRDSET *st = NULL;
if (likely(name && id_modifier)) {
- snprintfz(id, 50, "mdstat.%s_%s", name, id_modifier);
+ snprintfz(id, sizeof(id) - 1, "mdstat.%s_%s", name, id_modifier);
st = rrdset_find_active_byname_localhost(id);
if (likely(st))
- rrdset_is_obsolete(st);
+ rrdset_is_obsolete___safe_from_collector_thread(st);
}
}
@@ -409,7 +409,7 @@ int do_proc_mdstat(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_isnot_obsolete(st_mdstat_health);
+ rrdset_isnot_obsolete___safe_from_collector_thread(st_mdstat_health);
}
if (!redundant_num) {
@@ -438,10 +438,10 @@ int do_proc_mdstat(int update_every, usec_t dt)
if (likely(raid->redundant)) {
if (likely(do_disks)) {
- snprintfz(id, 50, "%s_disks", raid->name);
+ snprintfz(id, sizeof(id) - 1, "%s_disks", raid->name);
if (unlikely(!raid->st_disks && !(raid->st_disks = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, 50, "%s (%s)", raid->name, raid->level);
+ snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
raid->st_disks = rrdset_create_localhost(
"mdstat",
@@ -457,7 +457,7 @@ int do_proc_mdstat(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_STACKED);
- rrdset_isnot_obsolete(raid->st_disks);
+ rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_disks);
add_labels_to_mdstat(raid, raid->st_disks);
}
@@ -473,10 +473,10 @@ int do_proc_mdstat(int update_every, usec_t dt)
}
if (likely(do_mismatch)) {
- snprintfz(id, 50, "%s_mismatch", raid->name);
+ snprintfz(id, sizeof(id) - 1, "%s_mismatch", raid->name);
if (unlikely(!raid->st_mismatch_cnt && !(raid->st_mismatch_cnt = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, 50, "%s (%s)", raid->name, raid->level);
+ snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
raid->st_mismatch_cnt = rrdset_create_localhost(
"mdstat",
@@ -492,7 +492,7 @@ int do_proc_mdstat(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_isnot_obsolete(raid->st_mismatch_cnt);
+ rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_mismatch_cnt);
add_labels_to_mdstat(raid, raid->st_mismatch_cnt);
}
@@ -505,10 +505,10 @@ int do_proc_mdstat(int update_every, usec_t dt)
}
if (likely(do_operations)) {
- snprintfz(id, 50, "%s_operation", raid->name);
+ snprintfz(id, sizeof(id) - 1, "%s_operation", raid->name);
if (unlikely(!raid->st_operation && !(raid->st_operation = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, 50, "%s (%s)", raid->name, raid->level);
+ snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
raid->st_operation = rrdset_create_localhost(
"mdstat",
@@ -524,7 +524,7 @@ int do_proc_mdstat(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_isnot_obsolete(raid->st_operation);
+ rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_operation);
add_labels_to_mdstat(raid, raid->st_operation);
}
@@ -544,9 +544,9 @@ int do_proc_mdstat(int update_every, usec_t dt)
rrddim_set_by_pointer(raid->st_operation, raid->rd_reshape, raid->reshape);
rrdset_done(raid->st_operation);
- snprintfz(id, 50, "%s_finish", raid->name);
+ snprintfz(id, sizeof(id) - 1, "%s_finish", raid->name);
if (unlikely(!raid->st_finish && !(raid->st_finish = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, 50, "%s (%s)", raid->name, raid->level);
+ snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
raid->st_finish = rrdset_create_localhost(
"mdstat",
@@ -561,7 +561,7 @@ int do_proc_mdstat(int update_every, usec_t dt)
NETDATA_CHART_PRIO_MDSTAT_FINISH + raid_idx * 10,
update_every, RRDSET_TYPE_LINE);
- rrdset_isnot_obsolete(raid->st_finish);
+ rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_finish);
add_labels_to_mdstat(raid, raid->st_finish);
}
@@ -572,9 +572,9 @@ int do_proc_mdstat(int update_every, usec_t dt)
rrddim_set_by_pointer(raid->st_finish, raid->rd_finish_in, raid->finish_in);
rrdset_done(raid->st_finish);
- snprintfz(id, 50, "%s_speed", raid->name);
+ snprintfz(id, sizeof(id) - 1, "%s_speed", raid->name);
if (unlikely(!raid->st_speed && !(raid->st_speed = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, 50, "%s (%s)", raid->name, raid->level);
+ snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
raid->st_speed = rrdset_create_localhost(
"mdstat",
@@ -590,7 +590,7 @@ int do_proc_mdstat(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_isnot_obsolete(raid->st_speed);
+ rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_speed);
add_labels_to_mdstat(raid, raid->st_speed);
}
@@ -603,10 +603,10 @@ int do_proc_mdstat(int update_every, usec_t dt)
}
} else {
if (likely(do_nonredundant)) {
- snprintfz(id, 50, "%s_availability", raid->name);
+ snprintfz(id, sizeof(id) - 1, "%s_availability", raid->name);
if (unlikely(!raid->st_nonredundant && !(raid->st_nonredundant = rrdset_find_active_localhost(id)))) {
- snprintfz(family, 50, "%s (%s)", raid->name, raid->level);
+ snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
raid->st_nonredundant = rrdset_create_localhost(
"mdstat",
@@ -622,7 +622,7 @@ int do_proc_mdstat(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_isnot_obsolete(raid->st_nonredundant);
+ rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_nonredundant);
add_labels_to_mdstat(raid, raid->st_nonredundant);
}
diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c
index 8539c7725..b39f39683 100644
--- a/collectors/proc.plugin/proc_net_dev.c
+++ b/collectors/proc.plugin/proc_net_dev.c
@@ -5,16 +5,35 @@
#define PLUGIN_PROC_MODULE_NETDEV_NAME "/proc/net/dev"
#define CONFIG_SECTION_PLUGIN_PROC_NETDEV "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETDEV_NAME
+#define RRDFUNCTIONS_NETDEV_HELP "View network interface statistics"
+
#define STATE_LENGTH_MAX 32
#define READ_RETRY_PERIOD 60 // seconds
+void cgroup_netdev_reset_all(void);
+void cgroup_netdev_release(const DICTIONARY_ITEM *link);
+const void *cgroup_netdev_dup(const DICTIONARY_ITEM *link);
+void cgroup_netdev_add_bandwidth(const DICTIONARY_ITEM *link, NETDATA_DOUBLE received, NETDATA_DOUBLE sent);
+
enum {
NETDEV_DUPLEX_UNKNOWN,
NETDEV_DUPLEX_HALF,
NETDEV_DUPLEX_FULL
};
+static const char *get_duplex_string(int duplex)
+{
+ switch (duplex) {
+ case NETDEV_DUPLEX_FULL:
+ return "full";
+ case NETDEV_DUPLEX_HALF:
+ return "half";
+ default:
+ return "unknown";
+ }
+}
+
enum {
NETDEV_OPERSTATE_UNKNOWN,
NETDEV_OPERSTATE_NOTPRESENT,
@@ -44,6 +63,26 @@ static inline int get_operstate(char *operstate)
return NETDEV_OPERSTATE_UNKNOWN;
}
+static const char *get_operstate_string(int operstate)
+{
+ switch (operstate) {
+ case NETDEV_OPERSTATE_UP:
+ return "up";
+ case NETDEV_OPERSTATE_DOWN:
+ return "down";
+ case NETDEV_OPERSTATE_NOTPRESENT:
+ return "notpresent";
+ case NETDEV_OPERSTATE_LOWERLAYERDOWN:
+ return "lowerlayerdown";
+ case NETDEV_OPERSTATE_TESTING:
+ return "testing";
+ case NETDEV_OPERSTATE_DORMANT:
+ return "dormant";
+ default:
+ return "unknown";
+ }
+}
+
// ----------------------------------------------------------------------------
// netdev list
@@ -58,6 +97,8 @@ static struct netdev {
int enabled;
int updated;
+ bool function_ready;
+
time_t discover_time;
int carrier_file_exists;
@@ -208,6 +249,8 @@ static struct netdev {
char *filename_carrier;
char *filename_mtu;
+ const DICTIONARY_ITEM *cgroup_netdev_link;
+
struct netdev *next;
} *netdev_root = NULL, *netdev_last_used = NULL;
@@ -216,18 +259,18 @@ static size_t netdev_added = 0, netdev_found = 0;
// ----------------------------------------------------------------------------
static void netdev_charts_release(struct netdev *d) {
- if(d->st_bandwidth) rrdset_is_obsolete(d->st_bandwidth);
- if(d->st_packets) rrdset_is_obsolete(d->st_packets);
- if(d->st_errors) rrdset_is_obsolete(d->st_errors);
- if(d->st_drops) rrdset_is_obsolete(d->st_drops);
- if(d->st_fifo) rrdset_is_obsolete(d->st_fifo);
- if(d->st_compressed) rrdset_is_obsolete(d->st_compressed);
- if(d->st_events) rrdset_is_obsolete(d->st_events);
- if(d->st_speed) rrdset_is_obsolete(d->st_speed);
- if(d->st_duplex) rrdset_is_obsolete(d->st_duplex);
- if(d->st_operstate) rrdset_is_obsolete(d->st_operstate);
- if(d->st_carrier) rrdset_is_obsolete(d->st_carrier);
- if(d->st_mtu) rrdset_is_obsolete(d->st_mtu);
+ if(d->st_bandwidth) rrdset_is_obsolete___safe_from_collector_thread(d->st_bandwidth);
+ if(d->st_packets) rrdset_is_obsolete___safe_from_collector_thread(d->st_packets);
+ if(d->st_errors) rrdset_is_obsolete___safe_from_collector_thread(d->st_errors);
+ if(d->st_drops) rrdset_is_obsolete___safe_from_collector_thread(d->st_drops);
+ if(d->st_fifo) rrdset_is_obsolete___safe_from_collector_thread(d->st_fifo);
+ if(d->st_compressed) rrdset_is_obsolete___safe_from_collector_thread(d->st_compressed);
+ if(d->st_events) rrdset_is_obsolete___safe_from_collector_thread(d->st_events);
+ if(d->st_speed) rrdset_is_obsolete___safe_from_collector_thread(d->st_speed);
+ if(d->st_duplex) rrdset_is_obsolete___safe_from_collector_thread(d->st_duplex);
+ if(d->st_operstate) rrdset_is_obsolete___safe_from_collector_thread(d->st_operstate);
+ if(d->st_carrier) rrdset_is_obsolete___safe_from_collector_thread(d->st_carrier);
+ if(d->st_mtu) rrdset_is_obsolete___safe_from_collector_thread(d->st_mtu);
d->st_bandwidth = NULL;
d->st_compressed = NULL;
@@ -326,6 +369,7 @@ static void netdev_free(struct netdev *d) {
netdev_charts_release(d);
netdev_free_chart_strings(d);
rrdlabels_destroy(d->chart_labels);
+ cgroup_netdev_release(d->cgroup_netdev_link);
freez((void *)d->name);
freez((void *)d->filename_speed);
@@ -352,11 +396,14 @@ static struct netdev_rename {
int processed;
+ const DICTIONARY_ITEM *cgroup_netdev_link;
+
struct netdev_rename *next;
} *netdev_rename_root = NULL;
static int netdev_pending_renames = 0;
static netdata_mutex_t netdev_rename_mutex = NETDATA_MUTEX_INITIALIZER;
+static netdata_mutex_t netdev_dev_mutex = NETDATA_MUTEX_INITIALIZER;
static struct netdev_rename *netdev_rename_find(const char *host_device, uint32_t hash) {
struct netdev_rename *r;
@@ -374,7 +421,8 @@ void netdev_rename_device_add(
const char *container_device,
const char *container_name,
RRDLABELS *labels,
- const char *ctx_prefix)
+ const char *ctx_prefix,
+ const DICTIONARY_ITEM *cgroup_netdev_link)
{
netdata_mutex_lock(&netdev_rename_mutex);
@@ -391,6 +439,8 @@ void netdev_rename_device_add(
r->hash = hash;
r->next = netdev_rename_root;
r->processed = 0;
+ r->cgroup_netdev_link = cgroup_netdev_link;
+
netdev_rename_root = r;
netdev_pending_renames++;
collector_info("CGROUP: registered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
@@ -406,6 +456,8 @@ void netdev_rename_device_add(
rrdlabels_migrate_to_these(r->chart_labels, labels);
r->processed = 0;
+ r->cgroup_netdev_link = cgroup_netdev_link;
+
netdev_pending_renames++;
collector_info("CGROUP: altered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
}
@@ -438,6 +490,7 @@ void netdev_rename_device_del(const char *host_device) {
freez((void *) r->container_device);
freez((void *) r->ctx_prefix);
rrdlabels_destroy(r->chart_labels);
+ cgroup_netdev_release(r->cgroup_netdev_link);
freez((void *) r);
break;
}
@@ -451,6 +504,7 @@ static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename *
netdev_charts_release(d);
netdev_free_chart_strings(d);
+ d->cgroup_netdev_link = cgroup_netdev_dup(r->cgroup_netdev_link);
char buffer[RRD_ID_LENGTH_MAX + 1];
@@ -521,6 +575,7 @@ static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename *
d->chart_family = strdupz("net");
rrdlabels_copy(d->chart_labels, r->chart_labels);
+ rrdlabels_add(d->chart_labels, "container_device", r->container_device, RRDLABEL_SRC_AUTO);
d->priority = NETDATA_CHART_PRIO_CGROUP_NET_IFACE;
d->flipped = 1;
@@ -554,6 +609,319 @@ static inline void netdev_rename_all_lock(void) {
}
// ----------------------------------------------------------------------------
+
+int netdev_function_net_interfaces(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused,
+ void *collector_data __maybe_unused,
+ rrd_function_result_callback_t result_cb, void *result_cb_data,
+ rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
+ rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
+ void *register_canceller_cb_data __maybe_unused) {
+
+ buffer_flush(wb);
+ wb->content_type = CT_APPLICATION_JSON;
+ buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
+
+ buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
+ buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
+ buffer_json_member_add_string(wb, "type", "table");
+ buffer_json_member_add_time_t(wb, "update_every", 1);
+ buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_NETDEV_HELP);
+ buffer_json_member_add_array(wb, "data");
+
+ double max_traffic_rx = 0.0;
+ double max_traffic_tx = 0.0;
+ double max_traffic = 0.0;
+ double max_packets_rx = 0.0;
+ double max_packets_tx = 0.0;
+ double max_mcast_rx = 0.0;
+ double max_drops_rx = 0.0;
+ double max_drops_tx = 0.0;
+
+ netdata_mutex_lock(&netdev_dev_mutex);
+
+ RRDDIM *rd = NULL;
+
+ for (struct netdev *d = netdev_root; d != netdev_last_used; d = d->next) {
+ if (unlikely(!d->function_ready))
+ continue;
+
+ buffer_json_add_array_item_array(wb);
+
+ buffer_json_add_array_item_string(wb, d->name);
+
+ buffer_json_add_array_item_string(wb, d->virtual ? "virtual" : "physical");
+ buffer_json_add_array_item_string(wb, d->flipped ? "cgroup" : "host");
+ buffer_json_add_array_item_string(wb, d->carrier == 1 ? "up" : "down");
+ buffer_json_add_array_item_string(wb, get_operstate_string(d->operstate));
+ buffer_json_add_array_item_string(wb, get_duplex_string(d->duplex));
+ buffer_json_add_array_item_double(wb, d->speed > 0 ? d->speed : NAN);
+ buffer_json_add_array_item_double(wb, d->mtu > 0 ? d->mtu : NAN);
+
+ rd = d->flipped ? d->rd_tbytes : d->rd_rbytes;
+ double traffic_rx = rrddim_get_last_stored_value(rd, &max_traffic_rx, 1000.0);
+ rd = d->flipped ? d->rd_rbytes : d->rd_tbytes;
+ double traffic_tx = rrddim_get_last_stored_value(rd, &max_traffic_tx, 1000.0);
+
+ rd = d->flipped ? d->rd_tpackets : d->rd_rpackets;
+ double packets_rx = rrddim_get_last_stored_value(rd, &max_packets_rx, 1000.0);
+ rd = d->flipped ? d->rd_rpackets : d->rd_tpackets;
+ double packets_tx = rrddim_get_last_stored_value(rd, &max_packets_tx, 1000.0);
+
+ double mcast_rx = rrddim_get_last_stored_value(d->rd_rmulticast, &max_mcast_rx, 1000.0);
+
+ rd = d->flipped ? d->rd_tdrops : d->rd_rdrops;
+ double drops_rx = rrddim_get_last_stored_value(rd, &max_drops_rx, 1.0);
+ rd = d->flipped ? d->rd_rdrops : d->rd_tdrops;
+ double drops_tx = rrddim_get_last_stored_value(rd, &max_drops_tx, 1.0);
+
+ // FIXME: "traffic" (total) is needed only for default_sorting
+ // can be removed when default_sorting will accept multiple columns (sum)
+ double traffic = NAN;
+ if (!isnan(traffic_rx) && !isnan(traffic_tx)) {
+ traffic = traffic_rx + traffic_tx;
+ max_traffic = MAX(max_traffic, traffic);
+ }
+
+
+ buffer_json_add_array_item_double(wb, traffic_rx);
+ buffer_json_add_array_item_double(wb, traffic_tx);
+ buffer_json_add_array_item_double(wb, traffic);
+ buffer_json_add_array_item_double(wb, packets_rx);
+ buffer_json_add_array_item_double(wb, packets_tx);
+ buffer_json_add_array_item_double(wb, mcast_rx);
+ buffer_json_add_array_item_double(wb, drops_rx);
+ buffer_json_add_array_item_double(wb, drops_tx);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "severity", drops_rx + drops_tx > 0 ? "warning" : "normal");
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_array_close(wb);
+ }
+
+ netdata_mutex_unlock(&netdev_dev_mutex);
+
+ buffer_json_array_close(wb); // data
+ buffer_json_member_add_object(wb, "columns");
+ {
+ size_t field_id = 0;
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Interface", "Network Interface Name",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Type", "Network Interface Type",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "UsedBy", "Indicates whether the network interface is used by a cgroup or by the host system",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "PhState", "Current Physical State",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "OpState", "Current Operational State",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Duplex", "Current Duplex Mode",
+ RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
+ 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Speed", "Current Link Speed",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
+ 0, "Mbit", NAN, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "MTU", "Maximum Transmission Unit",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
+ 0, "Octets", NAN, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "In", "Traffic Received",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Mbit", max_traffic_rx, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Out", "Traffic Sent",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Mbit", max_traffic_tx, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "Total", "Traffic Received and Sent",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Mbit", max_traffic, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_NONE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "PktsIn", "Received Packets",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Kpps", max_packets_rx, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "PktsOut", "Sent Packets",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Kpps", max_packets_tx, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "McastIn", "Multicast Received Packets",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Kpps", max_mcast_rx, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_NONE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "DropsIn", "Dropped Inbound Packets",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Drops", max_drops_rx, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(wb, field_id++, "DropsOut", "Dropped Outbound Packets",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
+ 2, "Drops", max_drops_tx, RRDF_FIELD_SORT_DESCENDING, NULL,
+ RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_VISIBLE,
+ NULL);
+
+ buffer_rrdf_table_add_field(
+ wb, field_id++,
+ "rowOptions", "rowOptions",
+ RRDF_FIELD_TYPE_NONE,
+ RRDR_FIELD_VISUAL_ROW_OPTIONS,
+ RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
+ RRDF_FIELD_SORT_FIXED,
+ NULL,
+ RRDF_FIELD_SUMMARY_COUNT,
+ RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_OPTS_DUMMY,
+ NULL);
+ }
+
+ buffer_json_object_close(wb); // columns
+ buffer_json_member_add_string(wb, "default_sort_column", "Total");
+
+ buffer_json_member_add_object(wb, "charts");
+ {
+ buffer_json_member_add_object(wb, "Traffic");
+ {
+ buffer_json_member_add_string(wb, "name", "Traffic");
+ buffer_json_member_add_string(wb, "type", "stacked-bar");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "In");
+ buffer_json_add_array_item_string(wb, "Out");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "Packets");
+ {
+ buffer_json_member_add_string(wb, "name", "Packets");
+ buffer_json_member_add_string(wb, "type", "stacked-bar");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "PktsIn");
+ buffer_json_add_array_item_string(wb, "PktsOut");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb); // charts
+
+ buffer_json_member_add_array(wb, "default_charts");
+ {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, "Traffic");
+ buffer_json_add_array_item_string(wb, "Interface");
+ buffer_json_array_close(wb);
+
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, "Traffic");
+ buffer_json_add_array_item_string(wb, "Type");
+ buffer_json_array_close(wb);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_object(wb, "group_by");
+ {
+ buffer_json_member_add_object(wb, "Type");
+ {
+ buffer_json_member_add_string(wb, "name", "Type");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "Type");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "UsedBy");
+ {
+ buffer_json_member_add_string(wb, "name", "UsedBy");
+ buffer_json_member_add_array(wb, "columns");
+ {
+ buffer_json_add_array_item_string(wb, "UsedBy");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb); // group_by
+
+ buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
+ buffer_json_finalize(wb);
+
+ int response = HTTP_RESP_OK;
+ if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
+ buffer_flush(wb);
+ response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
+ }
+
+ if(result_cb)
+ result_cb(wb, response, result_cb_data);
+
+ return response;
+}
+
// netdev data collection
static void netdev_cleanup() {
@@ -615,6 +983,7 @@ static struct netdev *get_netdev(const char *name) {
d->hash = simple_hash(d->name);
d->len = strlen(d->name);
d->chart_labels = rrdlabels_create();
+ d->function_ready = false;
d->chart_type_net_bytes = strdupz("net");
d->chart_type_net_compressed = strdupz("net_compressed");
@@ -779,56 +1148,88 @@ int do_proc_net_dev(int update_every, usec_t dt) {
if(d->enabled)
d->enabled = !simple_pattern_matches(disabled_list, d->name);
- char buffer[FILENAME_MAX + 1];
+ char buf[FILENAME_MAX + 1];
+ snprintfz(buf, FILENAME_MAX, path_to_sys_devices_virtual_net, d->name);
- snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_net, d->name);
- if (likely(access(buffer, R_OK) == 0)) {
- d->virtual = 1;
- rrdlabels_add(d->chart_labels, "interface_type", "virtual", RRDLABEL_SRC_AUTO);
- }
- else {
+ d->virtual = likely(access(buf, R_OK) == 0) ? 1 : 0;
+
+ // At least on Proxmox inside LXC: eth0 is virtual.
+ // Virtual interfaces are not taken into account in system.net calculations
+ if (inside_lxc_container && d->virtual && strncmp(d->name, "eth", 3) == 0)
d->virtual = 0;
+
+ if (d->virtual)
+ rrdlabels_add(d->chart_labels, "interface_type", "virtual", RRDLABEL_SRC_AUTO);
+ else
rrdlabels_add(d->chart_labels, "interface_type", "real", RRDLABEL_SRC_AUTO);
- }
+
rrdlabels_add(d->chart_labels, "device", name, RRDLABEL_SRC_AUTO);
if(likely(!d->virtual)) {
// set the filename to get the interface speed
- snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_speed, d->name);
- d->filename_speed = strdupz(buffer);
+ snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_speed, d->name);
+ d->filename_speed = strdupz(buf);
- snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_duplex, d->name);
- d->filename_duplex = strdupz(buffer);
+ snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_duplex, d->name);
+ d->filename_duplex = strdupz(buf);
}
- snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_operstate, d->name);
- d->filename_operstate = strdupz(buffer);
+ snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_operstate, d->name);
+ d->filename_operstate = strdupz(buf);
- snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_carrier, d->name);
- d->filename_carrier = strdupz(buffer);
+ snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_carrier, d->name);
+ d->filename_carrier = strdupz(buf);
- snprintfz(buffer, FILENAME_MAX, path_to_sys_class_net_mtu, d->name);
- d->filename_mtu = strdupz(buffer);
+ snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_mtu, d->name);
+ d->filename_mtu = strdupz(buf);
- snprintfz(buffer, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name);
- d->enabled = config_get_boolean_ondemand(buffer, "enabled", d->enabled);
- d->virtual = config_get_boolean(buffer, "virtual", d->virtual);
+ snprintfz(buf, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name);
+
+ if (config_exists(buf, "enabled"))
+ d->enabled = config_get_boolean_ondemand(buf, "enabled", d->enabled);
+ if (config_exists(buf, "virtual"))
+ d->virtual = config_get_boolean(buf, "virtual", d->virtual);
if(d->enabled == CONFIG_BOOLEAN_NO)
continue;
- d->do_bandwidth = config_get_boolean_ondemand(buffer, "bandwidth", do_bandwidth);
- d->do_packets = config_get_boolean_ondemand(buffer, "packets", do_packets);
- d->do_errors = config_get_boolean_ondemand(buffer, "errors", do_errors);
- d->do_drops = config_get_boolean_ondemand(buffer, "drops", do_drops);
- d->do_fifo = config_get_boolean_ondemand(buffer, "fifo", do_fifo);
- d->do_compressed = config_get_boolean_ondemand(buffer, "compressed", do_compressed);
- d->do_events = config_get_boolean_ondemand(buffer, "events", do_events);
- d->do_speed = config_get_boolean_ondemand(buffer, "speed", do_speed);
- d->do_duplex = config_get_boolean_ondemand(buffer, "duplex", do_duplex);
- d->do_operstate = config_get_boolean_ondemand(buffer, "operstate", do_operstate);
- d->do_carrier = config_get_boolean_ondemand(buffer, "carrier", do_carrier);
- d->do_mtu = config_get_boolean_ondemand(buffer, "mtu", do_mtu);
+ d->do_bandwidth = do_bandwidth;
+ d->do_packets = do_packets;
+ d->do_errors = do_errors;
+ d->do_drops = do_drops;
+ d->do_fifo = do_fifo;
+ d->do_compressed = do_compressed;
+ d->do_events = do_events;
+ d->do_speed = do_speed;
+ d->do_duplex = do_duplex;
+ d->do_operstate = do_operstate;
+ d->do_carrier = do_carrier;
+ d->do_mtu = do_mtu;
+
+ if (config_exists(buf, "bandwidth"))
+ d->do_bandwidth = config_get_boolean_ondemand(buf, "bandwidth", do_bandwidth);
+ if (config_exists(buf, "packets"))
+ d->do_packets = config_get_boolean_ondemand(buf, "packets", do_packets);
+ if (config_exists(buf, "errors"))
+ d->do_errors = config_get_boolean_ondemand(buf, "errors", do_errors);
+ if (config_exists(buf, "drops"))
+ d->do_drops = config_get_boolean_ondemand(buf, "drops", do_drops);
+ if (config_exists(buf, "fifo"))
+ d->do_fifo = config_get_boolean_ondemand(buf, "fifo", do_fifo);
+ if (config_exists(buf, "compressed"))
+ d->do_compressed = config_get_boolean_ondemand(buf, "compressed", do_compressed);
+ if (config_exists(buf, "events"))
+ d->do_events = config_get_boolean_ondemand(buf, "events", do_events);
+ if (config_exists(buf, "speed"))
+ d->do_speed = config_get_boolean_ondemand(buf, "speed", do_speed);
+ if (config_exists(buf, "duplex"))
+ d->do_duplex = config_get_boolean_ondemand(buf, "duplex", do_duplex);
+ if (config_exists(buf, "operstate"))
+ d->do_operstate = config_get_boolean_ondemand(buf, "operstate", do_operstate);
+ if (config_exists(buf, "carrier"))
+ d->do_carrier = config_get_boolean_ondemand(buf, "carrier", do_carrier);
+ if (config_exists(buf, "mtu"))
+ d->do_mtu = config_get_boolean_ondemand(buf, "mtu", do_mtu);
}
if(unlikely(!d->enabled))
@@ -1008,6 +1409,11 @@ int do_proc_net_dev(int update_every, usec_t dt) {
rrddim_set_by_pointer(d->st_bandwidth, d->rd_tbytes, (collected_number)d->tbytes);
rrdset_done(d->st_bandwidth);
+ if(d->cgroup_netdev_link)
+ cgroup_netdev_add_bandwidth(d->cgroup_netdev_link,
+ d->flipped ? d->rd_tbytes->collector.last_stored_value : -d->rd_rbytes->collector.last_stored_value,
+ d->flipped ? -d->rd_rbytes->collector.last_stored_value : d->rd_tbytes->collector.last_stored_value);
+
// update the interface speed
if(d->filename_speed) {
if(unlikely(!d->chart_var_speed)) {
@@ -1462,6 +1868,8 @@ int do_proc_net_dev(int update_every, usec_t dt) {
rrddim_set_by_pointer(d->st_events, d->rd_tcarrier, (collected_number)d->tcarrier);
rrdset_done(d->st_events);
}
+
+ d->function_ready = true;
}
if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO &&
@@ -1518,6 +1926,9 @@ void *netdev_main(void *ptr)
netdata_thread_cleanup_push(netdev_main_cleanup, ptr);
+ rrd_collector_started();
+ rrd_function_add(localhost, NULL, "network-interfaces", 10, RRDFUNCTIONS_NETDEV_HELP, true, netdev_function_net_interfaces, NULL);
+
usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
heartbeat_t hb;
heartbeat_init(&hb);
@@ -1529,11 +1940,17 @@ void *netdev_main(void *ptr)
if (unlikely(!service_running(SERVICE_COLLECTORS)))
break;
+ cgroup_netdev_reset_all();
+
worker_is_busy(0);
+
+ netdata_mutex_lock(&netdev_dev_mutex);
if(do_proc_net_dev(localhost->rrd_update_every, hb_dt))
break;
+ netdata_mutex_unlock(&netdev_dev_mutex);
}
netdata_thread_cleanup_pop(1);
+
return NULL;
}
diff --git a/collectors/proc.plugin/proc_net_softnet_stat.c b/collectors/proc.plugin/proc_net_softnet_stat.c
index dfd372b2a..2f01b8859 100644
--- a/collectors/proc.plugin/proc_net_softnet_stat.c
+++ b/collectors/proc.plugin/proc_net_softnet_stat.c
@@ -111,12 +111,12 @@ int do_proc_net_softnet_stat(int update_every, usec_t dt) {
if(do_per_core) {
for(l = 0; l < lines ;l++) {
char id[50+1];
- snprintfz(id, 50, "cpu%zu_softnet_stat", l);
+ snprintfz(id, sizeof(id) - 1,"cpu%zu_softnet_stat", l);
st = rrdset_find_active_bytype_localhost("cpu", id);
if(unlikely(!st)) {
char title[100+1];
- snprintfz(title, 100, "CPU softnet_stat");
+ snprintfz(title, sizeof(title) - 1, "CPU softnet_stat");
st = rrdset_create_localhost(
"cpu"
diff --git a/collectors/proc.plugin/proc_net_wireless.c b/collectors/proc.plugin/proc_net_wireless.c
index 08ab2eada..c7efa3335 100644
--- a/collectors/proc.plugin/proc_net_wireless.c
+++ b/collectors/proc.plugin/proc_net_wireless.c
@@ -85,12 +85,13 @@ static struct netwireless {
static void netwireless_free_st(struct netwireless *wireless_dev)
{
- if (wireless_dev->st_status) rrdset_is_obsolete(wireless_dev->st_status);
- if (wireless_dev->st_link) rrdset_is_obsolete(wireless_dev->st_link);
- if (wireless_dev->st_level) rrdset_is_obsolete(wireless_dev->st_level);
- if (wireless_dev->st_noise) rrdset_is_obsolete(wireless_dev->st_noise);
- if (wireless_dev->st_discarded_packets) rrdset_is_obsolete(wireless_dev->st_discarded_packets);
- if (wireless_dev->st_missed_beacon) rrdset_is_obsolete(wireless_dev->st_missed_beacon);
+ if (wireless_dev->st_status) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_status);
+ if (wireless_dev->st_link) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_link);
+ if (wireless_dev->st_level) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_level);
+ if (wireless_dev->st_noise) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_noise);
+ if (wireless_dev->st_discarded_packets)
+ rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_discarded_packets);
+ if (wireless_dev->st_missed_beacon) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_missed_beacon);
wireless_dev->st_status = NULL;
wireless_dev->st_link = NULL;
diff --git a/collectors/proc.plugin/proc_pagetypeinfo.c b/collectors/proc.plugin/proc_pagetypeinfo.c
index e5318ce8f..fc5496c63 100644
--- a/collectors/proc.plugin/proc_pagetypeinfo.c
+++ b/collectors/proc.plugin/proc_pagetypeinfo.c
@@ -211,7 +211,7 @@ int do_proc_pagetypeinfo(int update_every, usec_t dt) {
);
for (o = 0; o < pageorders_cnt; o++) {
char id[3+1];
- snprintfz(id, 3, "%lu", o);
+ snprintfz(id, sizeof(id) - 1, "%lu", o);
char name[20+1];
dim_name(name, o, pagesize);
@@ -234,7 +234,7 @@ int do_proc_pagetypeinfo(int update_every, usec_t dt) {
// "pagetype Node" + NUMA-NodeId + ZoneName + TypeName
char setid[13+1+2+1+MAX_ZONETYPE_NAME+1+MAX_PAGETYPE_NAME+1];
- snprintfz(setid, 13+1+2+1+MAX_ZONETYPE_NAME+1+MAX_PAGETYPE_NAME, "pagetype_Node%d_%s_%s", pgl->node, pgl->zone, pgl->type);
+ snprintfz(setid, sizeof(setid) - 1, "pagetype_Node%d_%s_%s", pgl->node, pgl->zone, pgl->type);
// Skip explicitly refused charts
if (simple_pattern_matches(filter_types, setid))
@@ -260,14 +260,14 @@ int do_proc_pagetypeinfo(int update_every, usec_t dt) {
);
char node[50+1];
- snprintfz(node, 50, "node%d", pgl->node);
+ snprintfz(node, sizeof(node) - 1, "node%d", pgl->node);
rrdlabels_add(st_nodezonetype[p]->rrdlabels, "node_id", node, RRDLABEL_SRC_AUTO);
rrdlabels_add(st_nodezonetype[p]->rrdlabels, "node_zone", pgl->zone, RRDLABEL_SRC_AUTO);
rrdlabels_add(st_nodezonetype[p]->rrdlabels, "node_type", pgl->type, RRDLABEL_SRC_AUTO);
for (o = 0; o < pageorders_cnt; o++) {
char dimid[3+1];
- snprintfz(dimid, 3, "%lu", o);
+ snprintfz(dimid, sizeof(dimid) - 1, "%lu", o);
char dimname[20+1];
dim_name(dimname, o, pagesize);
diff --git a/collectors/proc.plugin/proc_softirqs.c b/collectors/proc.plugin/proc_softirqs.c
index ccf46cb8a..5f0502f66 100644
--- a/collectors/proc.plugin/proc_softirqs.c
+++ b/collectors/proc.plugin/proc_softirqs.c
@@ -197,10 +197,10 @@ int do_proc_softirqs(int update_every, usec_t dt) {
if (unlikely(core_sum == 0)) continue; // try next core
char id[50 + 1];
- snprintfz(id, 50, "cpu%d_softirqs", c);
+ snprintfz(id, sizeof(id) - 1, "cpu%d_softirqs", c);
char title[100 + 1];
- snprintfz(title, 100, "CPU softirqs");
+ snprintfz(title, sizeof(title) - 1, "CPU softirqs");
core_st[c] = rrdset_create_localhost(
"cpu"
@@ -218,7 +218,7 @@ int do_proc_softirqs(int update_every, usec_t dt) {
);
char core[50+1];
- snprintfz(core, 50, "cpu%d", c);
+ snprintfz(core, sizeof(core) - 1, "cpu%d", c);
rrdlabels_add(core_st[c]->rrdlabels, "cpu", core, RRDLABEL_SRC_AUTO);
}
diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c
index 428ef0d32..27178b60f 100644
--- a/collectors/proc.plugin/proc_spl_kstat_zfs.c
+++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c
@@ -240,7 +240,7 @@ DICTIONARY *zfs_pools = NULL;
void disable_zfs_pool_state(struct zfs_pool *pool)
{
if (pool->st)
- rrdset_is_obsolete(pool->st);
+ rrdset_is_obsolete___safe_from_collector_thread(pool->st);
pool->st = NULL;
@@ -335,7 +335,10 @@ int do_proc_spl_kstat_zfs_pool_state(int update_every, usec_t dt)
if (likely(do_zfs_pool_state)) {
DIR *dir = opendir(dirname);
if (unlikely(!dir)) {
- collector_error("Cannot read directory '%s'", dirname);
+ if (errno == ENOENT)
+ collector_info("Cannot read directory '%s'", dirname);
+ else
+ collector_error("Cannot read directory '%s'", dirname);
return 1;
}
diff --git a/collectors/proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c
index a4f76796b..84160f22f 100644
--- a/collectors/proc.plugin/proc_stat.c
+++ b/collectors/proc.plugin/proc_stat.c
@@ -1038,7 +1038,7 @@ int do_proc_stat(int update_every, usec_t dt) {
);
char corebuf[50+1];
- snprintfz(corebuf, 50, "cpu%zu", core);
+ snprintfz(corebuf, sizeof(corebuf) - 1, "cpu%zu", core);
rrdlabels_add(cpuidle_charts[core].st->rrdlabels, "cpu", corebuf, RRDLABEL_SRC_AUTO);
char cpuidle_dim_id[RRD_ID_LENGTH_MAX + 1];
diff --git a/collectors/proc.plugin/sys_block_zram.c b/collectors/proc.plugin/sys_block_zram.c
index f9166ace0..dac7cac0f 100644
--- a/collectors/proc.plugin/sys_block_zram.c
+++ b/collectors/proc.plugin/sys_block_zram.c
@@ -3,7 +3,7 @@
#include "plugin_proc.h"
#define PLUGIN_PROC_MODULE_ZRAM_NAME "/sys/block/zram"
-#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete(st); (st) = NULL; } } while(st)
+#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st)
typedef struct mm_stat {
unsigned long long orig_data_size;
diff --git a/collectors/proc.plugin/sys_class_drm.c b/collectors/proc.plugin/sys_class_drm.c
index 284662cf6..3ed1fb875 100644
--- a/collectors/proc.plugin/sys_class_drm.c
+++ b/collectors/proc.plugin/sys_class_drm.c
@@ -648,13 +648,17 @@ static int read_clk_freq_file(procfile **p_ff, const char *const pathname, colle
*p_ff = procfile_open(pathname, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
if(unlikely(!*p_ff)) return -2;
}
-
+
if(unlikely(NULL == (*p_ff = procfile_readall(*p_ff)))) return -3;
for(size_t l = 0; l < procfile_lines(*p_ff) ; l++) {
+ char *str_with_units = NULL;
+ if((*p_ff)->lines->lines[l].words >= 3 && !strcmp(procfile_lineword((*p_ff), l, 2), "*")) //format: X: collected_number *
+ str_with_units = procfile_lineword((*p_ff), l, 1);
+ else if ((*p_ff)->lines->lines[l].words == 2 && !strcmp(procfile_lineword((*p_ff), l, 1), "*")) //format: collected_number *
+ str_with_units = procfile_lineword((*p_ff), l, 0);
- if((*p_ff)->lines->lines[l].words >= 3 && !strcmp(procfile_lineword((*p_ff), l, 2), "*")){
- char *str_with_units = procfile_lineword((*p_ff), l, 1);
+ if (str_with_units) {
char *delim = strchr(str_with_units, 'M');
char str_without_units[10];
memcpy(str_without_units, str_with_units, delim - str_with_units);
@@ -707,7 +711,7 @@ static int do_rrd_util_gpu(struct card *const c){
else {
collector_error("Cannot read util_gpu for %s: [%s]", c->pathname, c->id.marketing_name);
freez((void *) c->pathname_util_gpu);
- rrdset_is_obsolete(c->st_util_gpu);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_util_gpu);
return 1;
}
}
@@ -721,7 +725,7 @@ static int do_rrd_util_mem(struct card *const c){
else {
collector_error("Cannot read util_mem for %s: [%s]", c->pathname, c->id.marketing_name);
freez((void *) c->pathname_util_mem);
- rrdset_is_obsolete(c->st_util_mem);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_util_mem);
return 1;
}
}
@@ -735,7 +739,7 @@ static int do_rrd_clk_gpu(struct card *const c){
else {
collector_error("Cannot read clk_gpu for %s: [%s]", c->pathname, c->id.marketing_name);
freez((void *) c->pathname_clk_gpu);
- rrdset_is_obsolete(c->st_clk_gpu);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_clk_gpu);
return 1;
}
}
@@ -749,7 +753,7 @@ static int do_rrd_clk_mem(struct card *const c){
else {
collector_error("Cannot read clk_mem for %s: [%s]", c->pathname, c->id.marketing_name);
freez((void *) c->pathname_clk_mem);
- rrdset_is_obsolete(c->st_clk_mem);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_clk_mem);
return 1;
}
}
@@ -771,8 +775,8 @@ static int do_rrd_vram(struct card *const c){
collector_error("Cannot read used_vram for %s: [%s]", c->pathname, c->id.marketing_name);
freez((void *) c->pathname_mem_used_vram);
freez((void *) c->pathname_mem_total_vram);
- rrdset_is_obsolete(c->st_mem_usage_perc_vram);
- rrdset_is_obsolete(c->st_mem_usage_vram);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_perc_vram);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_vram);
return 1;
}
}
@@ -794,8 +798,8 @@ static int do_rrd_vis_vram(struct card *const c){
collector_error("Cannot read used_vis_vram for %s: [%s]", c->pathname, c->id.marketing_name);
freez((void *) c->pathname_mem_used_vis_vram);
freez((void *) c->pathname_mem_total_vis_vram);
- rrdset_is_obsolete(c->st_mem_usage_perc_vis_vram);
- rrdset_is_obsolete(c->st_mem_usage_vis_vram);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_perc_vis_vram);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_vis_vram);
return 1;
}
}
@@ -817,8 +821,8 @@ static int do_rrd_gtt(struct card *const c){
collector_error("Cannot read used_gtt for %s: [%s]", c->pathname, c->id.marketing_name);
freez((void *) c->pathname_mem_used_gtt);
freez((void *) c->pathname_mem_total_gtt);
- rrdset_is_obsolete(c->st_mem_usage_perc_gtt);
- rrdset_is_obsolete(c->st_mem_usage_gtt);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_perc_gtt);
+ rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_gtt);
return 1;
}
}
diff --git a/collectors/proc.plugin/sys_class_power_supply.c b/collectors/proc.plugin/sys_class_power_supply.c
index 8687ecb55..3f793b9c6 100644
--- a/collectors/proc.plugin/sys_class_power_supply.c
+++ b/collectors/proc.plugin/sys_class_power_supply.c
@@ -65,7 +65,7 @@ void power_supply_free(struct power_supply *ps) {
// free capacity structure
if(likely(ps->capacity)) {
- if(likely(ps->capacity->st)) rrdset_is_obsolete(ps->capacity->st);
+ if(likely(ps->capacity->st)) rrdset_is_obsolete___safe_from_collector_thread(ps->capacity->st);
freez(ps->capacity->filename);
if(likely(ps->capacity->fd != -1)) close(ps->capacity->fd);
files_num--;
@@ -89,7 +89,7 @@ void power_supply_free(struct power_supply *ps) {
}
// free properties
- if(likely(pr->st)) rrdset_is_obsolete(pr->st);
+ if(likely(pr->st)) rrdset_is_obsolete___safe_from_collector_thread(pr->st);
freez(pr->name);
freez(pr->title);
freez(pr->units);
diff --git a/collectors/proc.plugin/sys_devices_pci_aer.c b/collectors/proc.plugin/sys_devices_pci_aer.c
index 296195182..563ebf051 100644
--- a/collectors/proc.plugin/sys_devices_pci_aer.c
+++ b/collectors/proc.plugin/sys_devices_pci_aer.c
@@ -240,8 +240,8 @@ int do_proc_sys_devices_pci_aer(int update_every, usec_t dt __maybe_unused) {
continue;
if(!a->st) {
- const char *title;
- const char *context;
+ const char *title = "";
+ const char *context = "";
switch(a->type) {
case AER_DEV_NONFATAL:
diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c
index da89411bd..f1d6fe720 100644
--- a/collectors/proc.plugin/sys_fs_btrfs.c
+++ b/collectors/proc.plugin/sys_fs_btrfs.c
@@ -196,8 +196,8 @@ static inline int collect_btrfs_commits_stats(BTRFS_NODE *node, int update_every
static inline void btrfs_free_commits_stats(BTRFS_NODE *node){
if(node->st_commits){
- rrdset_is_obsolete(node->st_commits);
- rrdset_is_obsolete(node->st_commit_timings);
+ rrdset_is_obsolete___safe_from_collector_thread(node->st_commits);
+ rrdset_is_obsolete___safe_from_collector_thread(node->st_commit_timings);
}
freez(node->commit_stats_filename);
node->commit_stats_filename = NULL;
@@ -211,7 +211,7 @@ static inline void btrfs_free_disk(BTRFS_DISK *d) {
static inline void btrfs_free_device(BTRFS_DEVICE *d) {
if(d->st_error_stats)
- rrdset_is_obsolete(d->st_error_stats);
+ rrdset_is_obsolete___safe_from_collector_thread(d->st_error_stats);
freez(d->error_stats_filename);
freez(d);
}
@@ -220,16 +220,16 @@ static inline void btrfs_free_node(BTRFS_NODE *node) {
// collector_info("BTRFS: destroying '%s'", node->id);
if(node->st_allocation_disks)
- rrdset_is_obsolete(node->st_allocation_disks);
+ rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_disks);
if(node->st_allocation_data)
- rrdset_is_obsolete(node->st_allocation_data);
+ rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_data);
if(node->st_allocation_metadata)
- rrdset_is_obsolete(node->st_allocation_metadata);
+ rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_metadata);
if(node->st_allocation_system)
- rrdset_is_obsolete(node->st_allocation_system);
+ rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_system);
freez(node->allocation_data_bytes_used_filename);
freez(node->allocation_data_total_bytes_filename);
@@ -392,14 +392,14 @@ static inline int find_btrfs_devices(BTRFS_NODE *node, const char *path) {
continue;
}
- collector_info("BTRFS: device found '%s'", de->d_name);
+ // internal_error("BTRFS: device found '%s'", de->d_name);
// --------------------------------------------------------------------
// search for it
for(d = node->devices ; d ; d = d->next) {
if(str2ll(de->d_name, NULL) == d->id){
- collector_info("BTRFS: existing device id '%d'", d->id);
+ // collector_info("BTRFS: existing device id '%d'", d->id);
break;
}
}
@@ -411,11 +411,11 @@ static inline int find_btrfs_devices(BTRFS_NODE *node, const char *path) {
d = callocz(sizeof(BTRFS_DEVICE), 1);
d->id = str2ll(de->d_name, NULL);
- collector_info("BTRFS: new device with id '%d'", d->id);
+ // collector_info("BTRFS: new device with id '%d'", d->id);
snprintfz(filename, FILENAME_MAX, "%s/%d/error_stats", path, d->id);
d->error_stats_filename = strdupz(filename);
- collector_info("BTRFS: error_stats_filename '%s'", filename);
+ // collector_info("BTRFS: error_stats_filename '%s'", filename);
// link it
d->next = node->devices;
@@ -795,7 +795,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(id, RRD_ID_LENGTH_MAX, "disk_%s", node->id);
snprintfz(name, RRD_ID_LENGTH_MAX, "disk_%s", node->label);
- snprintfz(title, 200, "BTRFS Physical Disk Allocation");
+ snprintfz(title, sizeof(title) - 1, "BTRFS Physical Disk Allocation");
netdata_fix_chart_id(id);
netdata_fix_chart_name(name);
@@ -854,7 +854,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(id, RRD_ID_LENGTH_MAX, "data_%s", node->id);
snprintfz(name, RRD_ID_LENGTH_MAX, "data_%s", node->label);
- snprintfz(title, 200, "BTRFS Data Allocation");
+ snprintfz(title, sizeof(title) - 1, "BTRFS Data Allocation");
netdata_fix_chart_id(id);
netdata_fix_chart_name(name);
@@ -898,7 +898,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(id, RRD_ID_LENGTH_MAX, "metadata_%s", node->id);
snprintfz(name, RRD_ID_LENGTH_MAX, "metadata_%s", node->label);
- snprintfz(title, 200, "BTRFS Metadata Allocation");
+ snprintfz(title, sizeof(title) - 1, "BTRFS Metadata Allocation");
netdata_fix_chart_id(id);
netdata_fix_chart_name(name);
@@ -944,7 +944,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(id, RRD_ID_LENGTH_MAX, "system_%s", node->id);
snprintfz(name, RRD_ID_LENGTH_MAX, "system_%s", node->label);
- snprintfz(title, 200, "BTRFS System Allocation");
+ snprintfz(title, sizeof(title) - 1, "BTRFS System Allocation");
netdata_fix_chart_id(id);
netdata_fix_chart_name(name);
@@ -988,7 +988,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(id, RRD_ID_LENGTH_MAX, "commits_%s", node->id);
snprintfz(name, RRD_ID_LENGTH_MAX, "commits_%s", node->label);
- snprintfz(title, 200, "BTRFS Commits");
+ snprintfz(title, sizeof(title) - 1, "BTRFS Commits");
netdata_fix_chart_id(id);
netdata_fix_chart_name(name);
@@ -1021,7 +1021,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(id, RRD_ID_LENGTH_MAX, "commits_perc_time_%s", node->id);
snprintfz(name, RRD_ID_LENGTH_MAX, "commits_perc_time_%s", node->label);
- snprintfz(title, 200, "BTRFS Commits Time Share");
+ snprintfz(title, sizeof(title) - 1, "BTRFS Commits Time Share");
netdata_fix_chart_id(id);
netdata_fix_chart_name(name);
@@ -1055,7 +1055,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(id, RRD_ID_LENGTH_MAX, "commit_timings_%s", node->id);
snprintfz(name, RRD_ID_LENGTH_MAX, "commit_timings_%s", node->label);
- snprintfz(title, 200, "BTRFS Commit Timings");
+ snprintfz(title, sizeof(title) - 1, "BTRFS Commit Timings");
netdata_fix_chart_id(id);
netdata_fix_chart_name(name);
@@ -1101,7 +1101,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(id, RRD_ID_LENGTH_MAX, "device_errors_dev%d_%s", d->id, node->id);
snprintfz(name, RRD_ID_LENGTH_MAX, "device_errors_dev%d_%s", d->id, node->label);
- snprintfz(title, 200, "BTRFS Device Errors");
+ snprintfz(title, sizeof(title) - 1, "BTRFS Device Errors");
netdata_fix_chart_id(id);
netdata_fix_chart_name(name);