summaryrefslogtreecommitdiffstats
path: root/collectors/proc.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-10-17 09:30:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-10-17 09:30:20 +0000
commit386ccdd61e8256c8b21ee27ee2fc12438fc5ca98 (patch)
treec9fbcacdb01f029f46133a5ba7ecd610c2bcb041 /collectors/proc.plugin
parentAdding upstream version 1.42.4. (diff)
downloadnetdata-386ccdd61e8256c8b21ee27ee2fc12438fc5ca98.tar.xz
netdata-386ccdd61e8256c8b21ee27ee2fc12438fc5ca98.zip
Adding upstream version 1.43.0.upstream/1.43.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/proc.plugin')
-rw-r--r--collectors/proc.plugin/README.md6
-rw-r--r--collectors/proc.plugin/integrations/amd_gpu.md109
-rw-r--r--collectors/proc.plugin/integrations/btrfs.md136
-rw-r--r--collectors/proc.plugin/integrations/conntrack.md104
-rw-r--r--collectors/proc.plugin/integrations/disk_statistics.md148
-rw-r--r--collectors/proc.plugin/integrations/entropy.md132
-rw-r--r--collectors/proc.plugin/integrations/infiniband.md98
-rw-r--r--collectors/proc.plugin/integrations/inter_process_communication.md119
-rw-r--r--collectors/proc.plugin/integrations/interrupts.md140
-rw-r--r--collectors/proc.plugin/integrations/ip_virtual_server.md96
-rw-r--r--collectors/proc.plugin/integrations/ipv6_socket_statistics.md98
-rw-r--r--collectors/proc.plugin/integrations/kernel_same-page_merging.md102
-rw-r--r--collectors/proc.plugin/integrations/md_raid.md124
-rw-r--r--collectors/proc.plugin/integrations/memory_modules_dimms.md145
-rw-r--r--collectors/proc.plugin/integrations/memory_statistics.md137
-rw-r--r--collectors/proc.plugin/integrations/memory_usage.md134
-rw-r--r--collectors/proc.plugin/integrations/network_interfaces.md136
-rw-r--r--collectors/proc.plugin/integrations/network_statistics.md160
-rw-r--r--collectors/proc.plugin/integrations/nfs_client.md98
-rw-r--r--collectors/proc.plugin/integrations/nfs_server.md103
-rw-r--r--collectors/proc.plugin/integrations/non-uniform_memory_access.md110
-rw-r--r--collectors/proc.plugin/integrations/page_types.md112
-rw-r--r--collectors/proc.plugin/integrations/power_supply.md106
-rw-r--r--collectors/proc.plugin/integrations/pressure_stall_information.md128
-rw-r--r--collectors/proc.plugin/integrations/sctp_statistics.md98
-rw-r--r--collectors/proc.plugin/integrations/socket_statistics.md108
-rw-r--r--collectors/proc.plugin/integrations/softirq_statistics.md132
-rw-r--r--collectors/proc.plugin/integrations/softnet_statistics.md134
-rw-r--r--collectors/proc.plugin/integrations/synproxy.md96
-rw-r--r--collectors/proc.plugin/integrations/system_load_average.md127
-rw-r--r--collectors/proc.plugin/integrations/system_statistics.md168
-rw-r--r--collectors/proc.plugin/integrations/system_uptime.md107
-rw-r--r--collectors/proc.plugin/integrations/wireless_network_interfaces.md99
-rw-r--r--collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md124
-rw-r--r--collectors/proc.plugin/integrations/zfs_pools.md104
-rw-r--r--collectors/proc.plugin/integrations/zram.md105
-rw-r--r--collectors/proc.plugin/metadata.yaml322
-rw-r--r--collectors/proc.plugin/plugin_proc.h2
-rw-r--r--collectors/proc.plugin/proc_diskstats.c129
-rw-r--r--collectors/proc.plugin/proc_net_dev.c6
-rw-r--r--collectors/proc.plugin/proc_net_netstat.c206
-rw-r--r--collectors/proc.plugin/proc_net_sockstat.c22
-rw-r--r--collectors/proc.plugin/proc_net_sockstat6.c10
-rw-r--r--collectors/proc.plugin/sys_devices_pci_aer.c5
-rw-r--r--collectors/proc.plugin/sys_devices_system_edac_mc.c12
45 files changed, 4581 insertions, 316 deletions
diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md
index 16ae6f412..62e46569f 100644
--- a/collectors/proc.plugin/README.md
+++ b/collectors/proc.plugin/README.md
@@ -398,11 +398,11 @@ You can set the following values for each configuration option:
#### Wireless configuration
-#### alarms
+#### alerts
-There are several alarms defined in `health.d/net.conf`.
+There are several alerts defined in `health.d/net.conf`.
-The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alarms can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alarm with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-families) line in the alarm configuration. For example, if you want to disable the `inbound packets dropped` alarm for `eth0`, set `families: !eth0 *` in the alarm definition for `template: inbound_packets_dropped`.
+The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alerts can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alert with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alert-line-families) line in the alert configuration. For example, if you want to disable the `inbound packets dropped` alert for `eth0`, set `families: !eth0 *` in the alert definition for `template: inbound_packets_dropped`.
#### configuration
diff --git a/collectors/proc.plugin/integrations/amd_gpu.md b/collectors/proc.plugin/integrations/amd_gpu.md
new file mode 100644
index 000000000..c9964dbb7
--- /dev/null
+++ b/collectors/proc.plugin/integrations/amd_gpu.md
@@ -0,0 +1,109 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/amd_gpu.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "AMD GPU"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AMD GPU
+
+
+<img src="https://netdata.cloud/img/amd.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /sys/class/drm
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage.
+
+It reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters.
+
+This collector is only supported on the following platforms:
+
+- Linux
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per gpu
+
+These metrics refer to the GPU.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| product_name | GPU product name (e.g. AMD RX 6600) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| amdgpu.gpu_utilization | utilization | percentage |
+| amdgpu.gpu_mem_utilization | utilization | percentage |
+| amdgpu.gpu_clk_frequency | frequency | MHz |
+| amdgpu.gpu_mem_clk_frequency | frequency | MHz |
+| amdgpu.gpu_mem_vram_usage_perc | usage | percentage |
+| amdgpu.gpu_mem_vram_usage | free, used | bytes |
+| amdgpu.gpu_mem_vis_vram_usage_perc | usage | percentage |
+| amdgpu.gpu_mem_vis_vram_usage | free, used | bytes |
+| amdgpu.gpu_mem_gtt_usage_perc | usage | percentage |
+| amdgpu.gpu_mem_gtt_usage | free, used | bytes |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/btrfs.md b/collectors/proc.plugin/integrations/btrfs.md
new file mode 100644
index 000000000..7c0764cf0
--- /dev/null
+++ b/collectors/proc.plugin/integrations/btrfs.md
@@ -0,0 +1,136 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/btrfs.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "BTRFS"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Filesystem/BTRFS"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# BTRFS
+
+
+<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /sys/fs/btrfs
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides usage and error statistics from the BTRFS filesystem.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per btrfs filesystem
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| filesystem_uuid | TBD |
+| filesystem_label | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| btrfs.disk | unallocated, data_free, data_used, meta_free, meta_used, sys_free, sys_used | MiB |
+| btrfs.data | free, used | MiB |
+| btrfs.metadata | free, used, reserved | MiB |
+| btrfs.system | free, used | MiB |
+| btrfs.commits | commits | commits |
+| btrfs.commits_perc_time | commits | percentage |
+| btrfs.commit_timings | last, max | ms |
+
+### Per btrfs device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device_id | TBD |
+| filesystem_uuid | TBD |
+| filesystem_label | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| btrfs.device_errors | write_errs, read_errs, flush_errs, corruption_errs, generation_errs | errors |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ btrfs_allocated ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.disk | percentage of allocated BTRFS physical disk space |
+| [ btrfs_data ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.data | utilization of BTRFS data space |
+| [ btrfs_metadata ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.metadata | utilization of BTRFS metadata space |
+| [ btrfs_system ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.system | utilization of BTRFS system space |
+| [ btrfs_device_read_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS read errors |
+| [ btrfs_device_write_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS write errors |
+| [ btrfs_device_flush_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS flush errors |
+| [ btrfs_device_corruption_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS corruption errors |
+| [ btrfs_device_generation_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS generation errors |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/conntrack.md b/collectors/proc.plugin/integrations/conntrack.md
new file mode 100644
index 000000000..543aafc16
--- /dev/null
+++ b/collectors/proc.plugin/integrations/conntrack.md
@@ -0,0 +1,104 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/conntrack.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Conntrack"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Firewall"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Conntrack
+
+
+<img src="https://netdata.cloud/img/firewall.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/stat/nf_conntrack
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Conntrack instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| netfilter.conntrack_sockets | connections | active connections |
+| netfilter.conntrack_new | new, ignore, invalid | connections/s |
+| netfilter.conntrack_changes | inserted, deleted, delete_list | changes/s |
+| netfilter.conntrack_expect | created, deleted, new | expectations/s |
+| netfilter.conntrack_search | searched, restarted, found | searches/s |
+| netfilter.conntrack_errors | icmp_error, error_failed, drop, early_drop | events/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ netfilter_conntrack_full ](https://github.com/netdata/netdata/blob/master/health/health.d/netfilter.conf) | netfilter.conntrack_sockets | netfilter connection tracker table size utilization |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/disk_statistics.md b/collectors/proc.plugin/integrations/disk_statistics.md
new file mode 100644
index 000000000..fc2ce5b08
--- /dev/null
+++ b/collectors/proc.plugin/integrations/disk_statistics.md
@@ -0,0 +1,148 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/disk_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Disk Statistics"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Disk"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Disk Statistics
+
+
+<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/diskstats
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Detailed statistics for each of your system's disk devices and partitions.
+The data is reported by the kernel and can be used to monitor disk activity on a Linux system.
+
+Get valuable insight into how your disks are performing and where potential bottlenecks might be.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Disk Statistics instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.io | in, out | KiB/s |
+
+### Per disk
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | TBD |
+| mount_point | TBD |
+| device_type | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| disk.io | reads, writes | KiB/s |
+| disk_ext.io | discards | KiB/s |
+| disk.ops | reads, writes | operations/s |
+| disk_ext.ops | discards, flushes | operations/s |
+| disk.qops | operations | operations |
+| disk.backlog | backlog | milliseconds |
+| disk.busy | busy | milliseconds |
+| disk.util | utilization | % of time working |
+| disk.mops | reads, writes | merged operations/s |
+| disk_ext.mops | discards | merged operations/s |
+| disk.iotime | reads, writes | milliseconds/s |
+| disk_ext.iotime | discards, flushes | milliseconds/s |
+| disk.await | reads, writes | milliseconds/operation |
+| disk_ext.await | discards, flushes | milliseconds/operation |
+| disk.avgsz | reads, writes | KiB/operation |
+| disk_ext.avgsz | discards | KiB/operation |
+| disk.svctm | svctm | milliseconds/operation |
+| disk.bcache_cache_alloc | ununsed, dirty, clean, metadata, undefined | percentage |
+| disk.bcache_hit_ratio | 5min, 1hour, 1day, ever | percentage |
+| disk.bcache_rates | congested, writeback | KiB/s |
+| disk.bcache_size | dirty | MiB |
+| disk.bcache_usage | avail | percentage |
+| disk.bcache_cache_read_races | races, errors | operations/s |
+| disk.bcache | hits, misses, collisions, readaheads | operations/s |
+| disk.bcache_bypass | hits, misses | operations/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ 10min_disk_backlog ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.backlog | average backlog size of the ${label:device} disk over the last 10 minutes |
+| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |
+| [ bcache_cache_dirty ](https://github.com/netdata/netdata/blob/master/health/health.d/bcache.conf) | disk.bcache_cache_alloc | percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small) |
+| [ bcache_cache_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/bcache.conf) | disk.bcache_cache_read_races | number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is reread from the backing device) |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/entropy.md b/collectors/proc.plugin/integrations/entropy.md
new file mode 100644
index 000000000..debf2e75e
--- /dev/null
+++ b/collectors/proc.plugin/integrations/entropy.md
@@ -0,0 +1,132 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/entropy.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Entropy"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/System"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Entropy
+
+
+<img src="https://netdata.cloud/img/syslog.png" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/sys/kernel/random/entropy_avail
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Entropy, a measure of the randomness or unpredictability of data.
+
+In the context of cryptography, entropy is used to generate random numbers or keys that are essential for
+secure communication and encryption. Without a good source of entropy, cryptographic protocols can become
+vulnerable to attacks that exploit the predictability of the generated keys.
+
+In most operating systems, entropy is generated by collecting random events from various sources, such as
+hardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool
+of entropy, which is then used to generate random numbers when needed.
+
+The `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs
+to access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device,
+which blocks until enough entropy is available to generate the requested numbers. This ensures that the
+generated numbers are truly random and not predictable.
+
+However, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing
+programs that rely on random numbers to slow down or even freeze. This is especially problematic for
+cryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH.
+
+To avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality
+entropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or
+radioactive decay. These sources of randomness are considered to be more reliable and unpredictable than
+software-based sources.
+
+One such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used
+for cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates
+high-quality entropy, which can be used to seed the pool of entropy in the operating system.
+
+Alternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by
+exploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions
+can help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions.
+
+
+
+
+This collector is only supported on the following platforms:
+
+- linux
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Entropy instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.entropy | entropy | entropy |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ lowest_entropy ](https://github.com/netdata/netdata/blob/master/health/health.d/entropy.conf) | system.entropy | minimum number of bits of entropy available for the kernel’s random number generator |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/infiniband.md b/collectors/proc.plugin/integrations/infiniband.md
new file mode 100644
index 000000000..6ebefe73e
--- /dev/null
+++ b/collectors/proc.plugin/integrations/infiniband.md
@@ -0,0 +1,98 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/infiniband.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "InfiniBand"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Network"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# InfiniBand
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /sys/class/infiniband
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration monitors InfiniBand network inteface statistics.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per infiniband port
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ib.bytes | Received, Sent | kilobits/s |
+| ib.packets | Received, Sent, Mcast_rcvd, Mcast_sent, Ucast_rcvd, Ucast_sent | packets/s |
+| ib.errors | Pkts_malformated, Pkts_rcvd_discarded, Pkts_sent_discarded, Tick_Wait_to_send, Pkts_missed_resource, Buffer_overrun, Link_Downed, Link_recovered, Link_integrity_err, Link_minor_errors, Pkts_rcvd_with_EBP, Pkts_rcvd_discarded_by_switch, Pkts_sent_discarded_by_switch | errors/s |
+| ib.hwerrors | Duplicated_packets, Pkt_Seq_Num_gap, Ack_timer_expired, Drop_missing_buffer, Drop_out_of_sequence, NAK_sequence_rcvd, CQE_err_Req, CQE_err_Resp, CQE_Flushed_err_Req, CQE_Flushed_err_Resp, Remote_access_err_Req, Remote_access_err_Resp, Remote_invalid_req, Local_length_err_Resp, RNR_NAK_Packets, CNP_Pkts_ignored, RoCE_ICRC_Errors | errors/s |
+| ib.hwpackets | RoCEv2_Congestion_sent, RoCEv2_Congestion_rcvd, IB_Congestion_handled, ATOMIC_req_rcvd, Connection_req_rcvd, Read_req_rcvd, Write_req_rcvd, RoCE_retrans_adaptive, RoCE_retrans_timeout, RoCE_slow_restart, RoCE_slow_restart_congestion, RoCE_slow_restart_count | packets/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/inter_process_communication.md b/collectors/proc.plugin/integrations/inter_process_communication.md
new file mode 100644
index 000000000..b36b02d3b
--- /dev/null
+++ b/collectors/proc.plugin/integrations/inter_process_communication.md
@@ -0,0 +1,119 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/inter_process_communication.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Inter Process Communication"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/IPC"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Inter Process Communication
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: ipc
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+IPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each
+other and synchronize their actions.
+
+This collector exposes information about:
+
+- Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that
+ allows messages to be placed onto a queue and read at a later time.
+
+- Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by
+ reading/writing into shared memory segments.
+
+- Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple
+ processes are trying to access a single shared resource, semaphores can ensure that only one process
+ accesses the resource at a given time.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Inter Process Communication instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.ipc_semaphores | semaphores | semaphores |
+| system.ipc_semaphore_arrays | arrays | arrays |
+| system.message_queue_message | a dimension per queue | messages |
+| system.message_queue_bytes | a dimension per queue | bytes |
+| system.shared_memory_segments | segments | segments |
+| system.shared_memory_bytes | bytes | bytes |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |
+| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/interrupts.md b/collectors/proc.plugin/integrations/interrupts.md
new file mode 100644
index 000000000..756324163
--- /dev/null
+++ b/collectors/proc.plugin/integrations/interrupts.md
@@ -0,0 +1,140 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/interrupts.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Interrupts"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/CPU"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Interrupts
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/interrupts
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt.
+The numbers reported are the counts of the interrupts that have occurred of each type.
+
+An interrupt is a signal to the processor emitted by hardware or software indicating an event that needs
+immediate attention. The processor then interrupts its current activities and executes the interrupt handler
+to deal with the event. This is part of the way a computer multitasks and handles concurrent processing.
+
+The types of interrupts include:
+
+- **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when
+ you type something on the keyboard, an interrupt is triggered so the processor can handle the new input.
+
+- **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily
+ used to switch the CPU among different tasks.
+
+- **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources.
+
+- **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc.
+
+Monitoring `/proc/interrupts` can be used for:
+
+- **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not
+ configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system
+ performance degradation.
+
+- **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem.
+
+- **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you
+ understand what your system is doing. It can provide insights into the system's interaction with hardware,
+ drivers, and other parts of the kernel.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Interrupts instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.interrupts | a dimension per device | interrupts/s |
+
+### Per cpu core
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cpu | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cpu.interrupts | a dimension per device | interrupts/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/ip_virtual_server.md b/collectors/proc.plugin/integrations/ip_virtual_server.md
new file mode 100644
index 000000000..22f43544e
--- /dev/null
+++ b/collectors/proc.plugin/integrations/ip_virtual_server.md
@@ -0,0 +1,96 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/ip_virtual_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "IP Virtual Server"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Network"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IP Virtual Server
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/ip_vs_stats
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration monitors IP Virtual Server statistics
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per IP Virtual Server instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ipvs.sockets | connections | connections/s |
+| ipvs.packets | received, sent | packets/s |
+| ipvs.net | received, sent | kilobits/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/ipv6_socket_statistics.md b/collectors/proc.plugin/integrations/ipv6_socket_statistics.md
new file mode 100644
index 000000000..bf0fbaa00
--- /dev/null
+++ b/collectors/proc.plugin/integrations/ipv6_socket_statistics.md
@@ -0,0 +1,98 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/ipv6_socket_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "IPv6 Socket Statistics"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Network"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IPv6 Socket Statistics
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/sockstat6
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides IPv6 socket statistics.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per IPv6 Socket Statistics instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ipv6.sockstat6_tcp_sockets | inuse | sockets |
+| ipv6.sockstat6_udp_sockets | inuse | sockets |
+| ipv6.sockstat6_udplite_sockets | inuse | sockets |
+| ipv6.sockstat6_raw_sockets | inuse | sockets |
+| ipv6.sockstat6_frag_sockets | inuse | fragments |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/kernel_same-page_merging.md b/collectors/proc.plugin/integrations/kernel_same-page_merging.md
new file mode 100644
index 000000000..bed7891bd
--- /dev/null
+++ b/collectors/proc.plugin/integrations/kernel_same-page_merging.md
@@ -0,0 +1,102 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/kernel_same-page_merging.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Kernel Same-Page Merging"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Memory"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kernel Same-Page Merging
+
+
+<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /sys/kernel/mm/ksm
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Kernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the
+memory of different processes and identify identical pages. It then merges these identical pages into a
+single page that the processes share. This is particularly useful for virtualization, where multiple virtual
+machines might be running the same operating system or applications and have many identical pages.
+
+The collector provides information about the operation and effectiveness of KSM on your system.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Kernel Same-Page Merging instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mem.ksm | shared, unshared, sharing, volatile | MiB |
+| mem.ksm_savings | savings, offered | MiB |
+| mem.ksm_ratios | savings | percentage |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/md_raid.md b/collectors/proc.plugin/integrations/md_raid.md
new file mode 100644
index 000000000..ef78b8269
--- /dev/null
+++ b/collectors/proc.plugin/integrations/md_raid.md
@@ -0,0 +1,124 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/md_raid.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "MD RAID"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Disk"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MD RAID
+
+
+<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/mdstat
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration monitors the status of MD RAID devices.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per MD RAID instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| md.health | a dimension per md array | failed disks |
+
+### Per md array
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | TBD |
+| raid_level | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| md.disks | inuse, down | disks |
+| md.mismatch_cnt | count | unsynchronized blocks |
+| md.status | check, resync, recovery, reshape | percent |
+| md.expected_time_until_operation_finish | finish_in | seconds |
+| md.operation_speed | speed | KiB/s |
+| md.nonredundant | available | boolean |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ mdstat_last_collected ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.disks | number of seconds since the last successful data collection |
+| [ mdstat_disks ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.disks | number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded. |
+| [ mdstat_mismatch_cnt ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.mismatch_cnt | number of unsynchronized blocks for the ${label:device} ${label:raid_level} array |
+| [ mdstat_nonredundant_last_collected ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.nonredundant | number of seconds since the last successful data collection |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/memory_modules_dimms.md b/collectors/proc.plugin/integrations/memory_modules_dimms.md
new file mode 100644
index 000000000..dc59fe5fc
--- /dev/null
+++ b/collectors/proc.plugin/integrations/memory_modules_dimms.md
@@ -0,0 +1,145 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/memory_modules_dimms.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Memory modules (DIMMs)"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Memory"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Memory modules (DIMMs)
+
+
+<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /sys/devices/system/edac/mc
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+The Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory,
+primarily ECC (Error-Correcting Code) memory errors.
+
+The collector provides data for:
+
+- Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds:
+ - errors related to a DIMM
+ - errors that cannot be associated with a DIMM
+
+- Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds:
+ - memory controllers that can identify the physical DIMMS and report errors directly for them,
+ - memory controllers that report errors for memory address ranges that can be linked to dimms.
+ In this case the DIMMS reported may be more than the physical DIMMS installed.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per memory controller
+
+These metrics refer to the memory controller.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |
+| mc_name | Memory controller type. |
+| size_mb | The amount of memory in megabytes that this memory controller manages. |
+| max_location | Last available memory slot in this memory controller. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mem.edac_mc | correctable, uncorrectable, correctable_noinfo, uncorrectable_noinfo | errors/s |
+
+### Per memory module
+
+These metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)).
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |
+| dimm | [dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module. |
+| dimm_dev_type | Type of DRAM device used in this memory module. For example, x1, x2, x4, x8. |
+| dimm_edac_mode | Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM. |
+| dimm_label | Label assigned to this memory module. |
+| dimm_location | Location of the memory module. |
+| dimm_mem_type | Type of the memory module. |
+| size | The amount of memory in megabytes that this memory module manages. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mem.edac_mc | correctable, uncorrectable | errors/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ ecc_memory_mc_noinfo_correctable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc | memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) in the last 10 minutes |
+| [ ecc_memory_mc_noinfo_uncorrectable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc | memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) in the last 10 minutes |
+| [ ecc_memory_dimm_correctable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc_dimm | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors in the last 10 minutes |
+| [ ecc_memory_dimm_uncorrectable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc_dimm | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors in the last 10 minutes |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/memory_statistics.md b/collectors/proc.plugin/integrations/memory_statistics.md
new file mode 100644
index 000000000..712b4b5e8
--- /dev/null
+++ b/collectors/proc.plugin/integrations/memory_statistics.md
@@ -0,0 +1,137 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/memory_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Memory Statistics"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Memory"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Memory Statistics
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/vmstat
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Linux Virtual memory subsystem.
+
+Information about memory management, indicating how effectively the kernel allocates and frees
+memory resources in response to system demands.
+
+Monitors page faults, which occur when a process requests a portion of its memory that isn't
+immediately available. Monitoring these events can help diagnose inefficiencies in memory management and
+provide insights into application behavior.
+
+Tracks swapping activity — a vital aspect of memory management where the kernel moves data from RAM to
+swap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap,
+a compressed cache for swap pages, and provides insights into its usage and performance implications.
+
+In the context of virtualized environments, it tracks the ballooning mechanism which is used to balance
+memory resources between host and guest systems.
+
+For systems using NUMA architecture, it provides insights into the local and remote memory accesses, which
+can impact the performance based on the memory access times.
+
+The collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out
+of memory resources.
+
+
+
+
+This collector is only supported on the following platforms:
+
+- linux
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Memory Statistics instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mem.swapio | in, out | KiB/s |
+| system.pgpgio | in, out | KiB/s |
+| system.pgfaults | minor, major | faults/s |
+| mem.balloon | inflate, deflate, migrate | KiB/s |
+| mem.zswapio | in, out | KiB/s |
+| mem.ksm_cow | swapin, write | KiB/s |
+| mem.thp_faults | alloc, fallback, fallback_charge | events/s |
+| mem.thp_file | alloc, fallback, mapped, fallback_charge | events/s |
+| mem.thp_zero | alloc, failed | events/s |
+| mem.thp_collapse | alloc, failed | events/s |
+| mem.thp_split | split, failed, split_pmd, split_deferred | events/s |
+| mem.thp_swapout | swapout, fallback | events/s |
+| mem.thp_compact | success, fail, stall | events/s |
+| mem.oom_kill | kills | kills/s |
+| mem.numa | local, foreign, interleave, other, pte_updates, huge_pte_updates, hint_faults, hint_faults_local, pages_migrated | events/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |
+| [ oom_kill ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.oom_kill | number of out of memory kills in the last 30 minutes |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/memory_usage.md b/collectors/proc.plugin/integrations/memory_usage.md
new file mode 100644
index 000000000..0eef72b12
--- /dev/null
+++ b/collectors/proc.plugin/integrations/memory_usage.md
@@ -0,0 +1,134 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/memory_usage.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Memory Usage"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Memory"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Memory Usage
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/meminfo
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+`/proc/meminfo` provides detailed information about the system's current memory usage. It includes information
+about different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory,
+SLAB memory, memory mappings, and more.
+
+Monitoring /proc/meminfo can be useful for:
+
+- **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system
+ tuning and optimization. For example, if your system is frequently low on free memory, it might benefit
+ from more RAM.
+
+- **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about
+ whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could
+ mean that your system is swapping out a lot of memory to disk, which can degrade performance.
+
+- **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed
+ decisions about future capacity needs.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Memory Usage instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.ram | free, used, cached, buffers | MiB |
+| mem.available | avail | MiB |
+| mem.swap | free, used | MiB |
+| mem.swap_cached | cached | MiB |
+| mem.zswap | in-ram, on-disk | MiB |
+| mem.hwcorrupt | HardwareCorrupted | MiB |
+| mem.commited | Commited_AS | MiB |
+| mem.writeback | Dirty, Writeback, FuseWriteback, NfsWriteback, Bounce | MiB |
+| mem.kernel | Slab, KernelStack, PageTables, VmallocUsed, Percpu | MiB |
+| mem.slab | reclaimable, unreclaimable | MiB |
+| mem.hugepages | free, used, surplus, reserved | MiB |
+| mem.thp | anonymous, shmem | MiB |
+| mem.thp_details | ShmemPmdMapped, FileHugePages, FilePmdMapped | MiB |
+| mem.reclaiming | Active, Inactive, Active(anon), Inactive(anon), Active(file), Inactive(file), Unevictable, Mlocked | MiB |
+| mem.high_low | high_used, low_used, high_free, low_free | MiB |
+| mem.cma | used, free | MiB |
+| mem.directmaps | 4k, 2m, 4m, 1g | MiB |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | system.ram | system memory utilization |
+| [ ram_available ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |
+| [ used_swap ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swap | swap memory utilization |
+| [ 1hour_memory_hw_corrupted ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.hwcorrupt | amount of memory corrupted due to a hardware failure |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/network_interfaces.md b/collectors/proc.plugin/integrations/network_interfaces.md
new file mode 100644
index 000000000..0d26b5b66
--- /dev/null
+++ b/collectors/proc.plugin/integrations/network_interfaces.md
@@ -0,0 +1,136 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/network_interfaces.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Network interfaces"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Network"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Network interfaces
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/dev
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor network interface metrics about bandwidth, state, errors and more.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Network interfaces instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.net | received, sent | kilobits/s |
+
+### Per network device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| interface_type | TBD |
+| device | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| net.net | received, sent | kilobits/s |
+| net.speed | speed | kilobits/s |
+| net.duplex | full, half, unknown | state |
+| net.operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
+| net.carrier | up, down | state |
+| net.mtu | mtu | octets |
+| net.packets | received, sent, multicast | packets/s |
+| net.errors | inbound, outbound | errors/s |
+| net.drops | inbound, outbound | drops/s |
+| net.fifo | receive, transmit | errors |
+| net.compressed | received, sent | packets/s |
+| net.events | frames, collisions, carrier | events/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ interface_speed ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |
+| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | average inbound utilization for the network interface ${label:device} over the last minute |
+| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | average outbound utilization for the network interface ${label:device} over the last minute |
+| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |
+| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |
+| [ wifi_inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |
+| [ wifi_outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |
+| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |
+| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
+| [ 10min_fifo_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.fifo | number of FIFO errors for the network interface ${label:device} in the last 10 minutes |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/network_statistics.md b/collectors/proc.plugin/integrations/network_statistics.md
new file mode 100644
index 000000000..f43da8339
--- /dev/null
+++ b/collectors/proc.plugin/integrations/network_statistics.md
@@ -0,0 +1,160 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/network_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Network statistics"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Network"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Network statistics
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/netstat
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides metrics from the `netstat`, `snmp` and `snmp6` modules.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Network statistics instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.ip | received, sent | kilobits/s |
+| ip.tcpmemorypressures | pressures | events/s |
+| ip.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger, failed | connections/s |
+| ip.tcpreorders | timestamp, sack, fack, reno | packets/s |
+| ip.tcpofo | inqueue, dropped, merged, pruned | packets/s |
+| ip.tcpsyncookies | received, sent, failed | packets/s |
+| ip.tcp_syn_queue | drops, cookies | packets/s |
+| ip.tcp_accept_queue | overflows, drops | packets/s |
+| ip.tcpsock | connections | active connections |
+| ip.tcppackets | received, sent | packets/s |
+| ip.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |
+| ip.tcpopens | active, passive | connections/s |
+| ip.tcphandshake | EstabResets, OutRsts, AttemptFails, SynRetrans | events/s |
+| ipv4.packets | received, sent, forwarded, delivered | packets/s |
+| ipv4.errors | InDiscards, OutDiscards, InNoRoutes, OutNoRoutes, InHdrErrors, InAddrErrors, InTruncatedPkts, InCsumErrors | packets/s |
+| ipc4.bcast | received, sent | kilobits/s |
+| ipv4.bcastpkts | received, sent | packets/s |
+| ipv4.mcast | received, sent | kilobits/s |
+| ipv4.mcastpkts | received, sent | packets/s |
+| ipv4.icmp | received, sent | packets/s |
+| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |
+| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |
+| ipv4.udppackets | received, sent | packets/s |
+| ipv4.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |
+| ipv4.udplite | received, sent | packets/s |
+| ipv4.udplite_errors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | packets/s |
+| ipv4.ecnpkts | CEP, NoECTP, ECTP0, ECTP1 | packets/s |
+| ipv4.fragsin | ok, failed, all | packets/s |
+| ipv4.fragsout | ok, failed, created | packets/s |
+| system.ipv6 | received, sent | kilobits/s |
+| ipv6.packets | received, sent, forwarded, delivers | packets/s |
+| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InUnknownProtos, InTooBigErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |
+| ipv6.bcast | received, sent | kilobits/s |
+| ipv6.mcast | received, sent | kilobits/s |
+| ipv6.mcastpkts | received, sent | packets/s |
+| ipv6.udppackets | received, sent | packets/s |
+| ipv6.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |
+| ipv6.udplitepackets | received, sent | packets/s |
+| ipv6.udpliteerrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors | events/s |
+| ipv6.icmp | received, sent | messages/s |
+| ipv6.icmpredir | received, sent | redirects/s |
+| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutPktTooBigs, OutTimeExcds, OutParmProblems | errors/s |
+| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |
+| ipv6.groupmemb | InQueries, OutQueries, InResponses, OutResponses, InReductions, OutReductions | messages/s |
+| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |
+| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |
+| ipv6.icmpmldv2 | received, sent | reports/s |
+| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |
+| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |
+| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |
+| ipv6.fragsin | ok, failed, timeout, all | packets/s |
+| ipv6.fragsout | ok, failed, all | packets/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ 1m_tcp_syn_queue_drops ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled) |
+| [ 1m_tcp_syn_queue_cookies ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of sent SYN cookies due to the full TCP SYN queue over the last minute |
+| [ 1m_tcp_accept_queue_overflows ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of overflows in the TCP accept queue over the last minute |
+| [ 1m_tcp_accept_queue_drops ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of dropped packets in the TCP accept queue over the last minute |
+| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_conn.conf) | ip.tcpsock | TCP connections utilization |
+| [ 1m_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last minute |
+| [ 10s_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |
+| [ 1m_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last minute |
+| [ 10s_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |
+| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |
+| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/nfs_client.md b/collectors/proc.plugin/integrations/nfs_client.md
new file mode 100644
index 000000000..696e0c0d6
--- /dev/null
+++ b/collectors/proc.plugin/integrations/nfs_client.md
@@ -0,0 +1,98 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/nfs_client.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "NFS Client"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Filesystem/NFS"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NFS Client
+
+
+<img src="https://netdata.cloud/img/nfs.png" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/rpc/nfs
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides statistics from the Linux kernel's NFS Client.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per NFS Client instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nfs.net | udp, tcp | operations/s |
+| nfs.rpc | calls, retransmits, auth_refresh | calls/s |
+| nfs.proc2 | a dimension per proc2 call | calls/s |
+| nfs.proc3 | a dimension per proc3 call | calls/s |
+| nfs.proc4 | a dimension per proc4 call | calls/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/nfs_server.md b/collectors/proc.plugin/integrations/nfs_server.md
new file mode 100644
index 000000000..ddbf03f90
--- /dev/null
+++ b/collectors/proc.plugin/integrations/nfs_server.md
@@ -0,0 +1,103 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/nfs_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "NFS Server"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Filesystem/NFS"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NFS Server
+
+
+<img src="https://netdata.cloud/img/nfs.png" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/rpc/nfsd
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides statistics from the Linux kernel's NFS Server.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per NFS Server instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nfsd.readcache | hits, misses, nocache | reads/s |
+| nfsd.filehandles | stale | handles/s |
+| nfsd.io | read, write | kilobytes/s |
+| nfsd.threads | threads | threads |
+| nfsd.net | udp, tcp | packets/s |
+| nfsd.rpc | calls, bad_format, bad_auth | calls/s |
+| nfsd.proc2 | a dimension per proc2 call | calls/s |
+| nfsd.proc3 | a dimension per proc3 call | calls/s |
+| nfsd.proc4 | a dimension per proc4 call | calls/s |
+| nfsd.proc4ops | a dimension per proc4 operation | operations/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/non-uniform_memory_access.md b/collectors/proc.plugin/integrations/non-uniform_memory_access.md
new file mode 100644
index 000000000..58b96a3e7
--- /dev/null
+++ b/collectors/proc.plugin/integrations/non-uniform_memory_access.md
@@ -0,0 +1,110 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/non-uniform_memory_access.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Non-Uniform Memory Access"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Memory"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Non-Uniform Memory Access
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /sys/devices/system/node
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Information about NUMA (Non-Uniform Memory Access) nodes on the system.
+
+NUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can
+share memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a
+symmetric multiprocessing (SMP) system.
+
+In a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes.
+Each node has its own memory and set of I/O devices, and one or more processors. While a processor can access
+memory in any of the nodes, it does so faster when accessing memory within its own node.
+
+The collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the
+efficiency of memory allocations in multi-node systems.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per numa node
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| numa_node | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mem.numa_nodes | hit, miss, local, foreign, interleave, other | events/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/page_types.md b/collectors/proc.plugin/integrations/page_types.md
new file mode 100644
index 000000000..7f84182de
--- /dev/null
+++ b/collectors/proc.plugin/integrations/page_types.md
@@ -0,0 +1,112 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/page_types.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Page types"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Memory"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Page types
+
+
+<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/pagetypeinfo
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides metrics about the system's memory page types
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Page types instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mem.pagetype_global | a dimension per pagesize | B |
+
+### Per node, zone, type
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| node_id | TBD |
+| node_zone | TBD |
+| node_type | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mem.pagetype | a dimension per pagesize | B |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/power_supply.md b/collectors/proc.plugin/integrations/power_supply.md
new file mode 100644
index 000000000..4980f845b
--- /dev/null
+++ b/collectors/proc.plugin/integrations/power_supply.md
@@ -0,0 +1,106 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/power_supply.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Power Supply"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Power Supply"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Power Supply
+
+
+<img src="https://netdata.cloud/img/powersupply.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /sys/class/power_supply
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration monitors Power supply metrics, such as battery status, AC power status and more.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per power device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| powersupply.capacity | capacity | percentage |
+| powersupply.charge | empty_design, empty, now, full, full_design | Ah |
+| powersupply.energy | empty_design, empty, now, full, full_design | Wh |
+| powersupply.voltage | min_design, min, now, max, max_design | V |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ linux_power_supply_capacity ](https://github.com/netdata/netdata/blob/master/health/health.d/linux_power_supply.conf) | powersupply.capacity | percentage of remaining power supply capacity |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/pressure_stall_information.md b/collectors/proc.plugin/integrations/pressure_stall_information.md
new file mode 100644
index 000000000..e590a8d38
--- /dev/null
+++ b/collectors/proc.plugin/integrations/pressure_stall_information.md
@@ -0,0 +1,128 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/pressure_stall_information.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Pressure Stall Information"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Pressure"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Pressure Stall Information
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/pressure
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Introduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information
+(PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to
+resource contention, such as CPU, memory, or I/O.
+
+The collectors monitored 3 separate files for CPU, memory, and I/O:
+
+- **cpu**: Tracks the amount of time tasks are stalled due to CPU contention.
+- **memory**: Tracks the amount of time tasks are stalled due to memory contention.
+- **io**: Tracks the amount of time tasks are stalled due to I/O contention.
+- **irq**: Tracks the amount of time tasks are stalled due to IRQ contention.
+
+Each of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes.
+
+Monitoring the /proc/pressure files can provide important insights into system performance and capacity planning:
+
+- **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are
+ frequently being stalled due to lack of resources, which can significantly degrade system performance.
+
+- **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can
+ help identify whether resource contention is the cause.
+
+- **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource
+ utilization and make informed decisions about when to add more resources to your system.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Pressure Stall Information instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.cpu_some_pressure | some10, some60, some300 | percentage |
+| system.cpu_some_pressure_stall_time | time | ms |
+| system.cpu_full_pressure | some10, some60, some300 | percentage |
+| system.cpu_full_pressure_stall_time | time | ms |
+| system.memory_some_pressure | some10, some60, some300 | percentage |
+| system.memory_some_pressure_stall_time | time | ms |
+| system.memory_full_pressure | some10, some60, some300 | percentage |
+| system.memory_full_pressure_stall_time | time | ms |
+| system.io_some_pressure | some10, some60, some300 | percentage |
+| system.io_some_pressure_stall_time | time | ms |
+| system.io_full_pressure | some10, some60, some300 | percentage |
+| system.io_full_pressure_stall_time | time | ms |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/sctp_statistics.md b/collectors/proc.plugin/integrations/sctp_statistics.md
new file mode 100644
index 000000000..ad9c26bf5
--- /dev/null
+++ b/collectors/proc.plugin/integrations/sctp_statistics.md
@@ -0,0 +1,98 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/sctp_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "SCTP Statistics"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Network"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SCTP Statistics
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/sctp/snmp
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides statistics about the Stream Control Transmission Protocol (SCTP).
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per SCTP Statistics instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| sctp.established | established | associations |
+| sctp.transitions | active, passive, aborted, shutdown | transitions/s |
+| sctp.packets | received, sent | packets/s |
+| sctp.packet_errors | invalid, checksum | packets/s |
+| sctp.fragmentation | reassembled, fragmented | packets/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/socket_statistics.md b/collectors/proc.plugin/integrations/socket_statistics.md
new file mode 100644
index 000000000..2c59f9883
--- /dev/null
+++ b/collectors/proc.plugin/integrations/socket_statistics.md
@@ -0,0 +1,108 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/socket_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Socket statistics"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Network"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Socket statistics
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/sockstat
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides socket statistics.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Socket statistics instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ip.sockstat_sockets | used | sockets |
+| ipv4.sockstat_tcp_sockets | alloc, orphan, inuse, timewait | sockets |
+| ipv4.sockstat_tcp_mem | mem | KiB |
+| ipv4.sockstat_udp_sockets | inuse | sockets |
+| ipv4.sockstat_udp_mem | mem | sockets |
+| ipv4.sockstat_udplite_sockets | inuse | sockets |
+| ipv4.sockstat_raw_sockets | inuse | sockets |
+| ipv4.sockstat_frag_sockets | inuse | fragments |
+| ipv4.sockstat_frag_mem | mem | KiB |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ tcp_orphans ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_orphans.conf) | ipv4.sockstat_tcp_sockets | orphan IPv4 TCP sockets utilization |
+| [ tcp_memory ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_mem.conf) | ipv4.sockstat_tcp_mem | TCP memory utilization |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/softirq_statistics.md b/collectors/proc.plugin/integrations/softirq_statistics.md
new file mode 100644
index 000000000..56cf9ab5c
--- /dev/null
+++ b/collectors/proc.plugin/integrations/softirq_statistics.md
@@ -0,0 +1,132 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/softirq_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "SoftIRQ statistics"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/CPU"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SoftIRQ statistics
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/softirqs
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+In the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half.
+The top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later.
+
+Softirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be
+deferred and processed later in a context where it's safe to enable interrupts.
+
+The actual work of handling the interrupt is offloaded to a softirq and executed later when the system
+decides it's a good time to process them. This helps to keep the system responsive by not blocking the top
+half for too long, which could lead to missed interrupts.
+
+Monitoring `/proc/softirqs` is useful for:
+
+- **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high
+ rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue.
+
+- **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about
+ what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem
+ with a disk.
+
+- **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what
+ your system is doing, particularly in terms of how it's interacting with hardware and how it's handling
+ interrupts.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per SoftIRQ statistics instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.softirqs | a dimension per softirq | softirqs/s |
+
+### Per cpu core
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cpu | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cpu.softirqs | a dimension per softirq | softirqs/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/softnet_statistics.md b/collectors/proc.plugin/integrations/softnet_statistics.md
new file mode 100644
index 000000000..84ac5ac88
--- /dev/null
+++ b/collectors/proc.plugin/integrations/softnet_statistics.md
@@ -0,0 +1,134 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/softnet_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Softnet Statistics"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Network"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Softnet Statistics
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/softnet_stat
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+`/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq.
+
+It provides information about:
+
+- Total number of processed packets (`processed`).
+- Times ksoftirq ran out of quota (`dropped`).
+- Times net_rx_action was rescheduled.
+- Number of times processed all lists before quota.
+- Number of times did not process all lists due to quota.
+- Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells.
+- Number of times GRO cells were processed.
+
+Monitoring the /proc/net/softnet_stat file can be useful for:
+
+- **Network performance monitoring**: By tracking the total number of processed packets and how many packets
+ were dropped, you can gain insights into your system's network performance.
+
+- **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues.
+ For instance, a high number of dropped packets may indicate a network problem.
+
+- **Capacity planning**: If your system is consistently processing near its maximum capacity of network
+ packets, it might be time to consider upgrading your network infrastructure.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Softnet Statistics instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |
+
+### Per cpu core
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cpu.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |
+| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/synproxy.md b/collectors/proc.plugin/integrations/synproxy.md
new file mode 100644
index 000000000..04169773b
--- /dev/null
+++ b/collectors/proc.plugin/integrations/synproxy.md
@@ -0,0 +1,96 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/synproxy.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Synproxy"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Firewall"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Synproxy
+
+
+<img src="https://netdata.cloud/img/firewall.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/stat/synproxy
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides statistics about the Synproxy netfilter module.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Synproxy instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| netfilter.synproxy_syn_received | received | packets/s |
+| netfilter.synproxy_conn_reopened | reopened | connections/s |
+| netfilter.synproxy_cookies | valid, invalid, retransmits | cookies/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/system_load_average.md b/collectors/proc.plugin/integrations/system_load_average.md
new file mode 100644
index 000000000..caff72737
--- /dev/null
+++ b/collectors/proc.plugin/integrations/system_load_average.md
@@ -0,0 +1,127 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/system_load_average.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "System Load Average"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/System"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# System Load Average
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/loadavg
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+The `/proc/loadavg` file provides information about the system load average.
+
+The load average is a measure of the amount of computational work that a system performs. It is a
+representation of the average system load over a period of time.
+
+This file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes,
+respectively. It also includes the currently running processes and the total number of processes.
+
+Monitoring the load average can be used for:
+
+- **System performance**: If the load average is too high, it may indicate that your system is overloaded.
+ On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the
+ load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is
+ overloaded and tasks are waiting for CPU time.
+
+- **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be
+ due to a runaway process, a software bug, or a hardware issue.
+
+- **Capacity planning**: By monitoring the load average over time, you can understand the trends in your
+ system's workload. This can help with capacity planning and scaling decisions.
+
+Remember that load average not only considers CPU usage, but also includes processes waiting for disk I/O.
+Therefore, high load averages could be due to I/O contention as well as CPU contention.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per System Load Average instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.load | load1, load5, load15 | load |
+| system.active_processes | active | processes |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | number of active CPU cores in the system |
+| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system fifteen-minute load average |
+| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system five-minute load average |
+| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system one-minute load average |
+| [ active_processes ](https://github.com/netdata/netdata/blob/master/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/system_statistics.md b/collectors/proc.plugin/integrations/system_statistics.md
new file mode 100644
index 000000000..2932dd8d2
--- /dev/null
+++ b/collectors/proc.plugin/integrations/system_statistics.md
@@ -0,0 +1,168 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/system_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "System statistics"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/System"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# System statistics
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/stat
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+CPU utilization, states and frequencies and key Linux system performance metrics.
+
+The `/proc/stat` file provides various types of system statistics:
+
+- The overall system CPU usage statistics
+- Per CPU core statistics
+- The total context switching of the system
+- The total number of processes running
+- The total CPU interrupts
+- The total CPU softirqs
+
+The collector also reads:
+
+- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.
+- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.
+- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.
+- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.
+- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.
+- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.
+- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.
+
+
+
+
+This collector is only supported on the following platforms:
+
+- linux
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The collector auto-detects all metrics. No configuration is needed.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per System statistics instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |
+| system.intr | interrupts | interrupts/s |
+| system.ctxt | switches | context switches/s |
+| system.forks | started | processes/s |
+| system.processes | running, blocked | processes |
+| cpu.core_throttling | a dimension per cpu core | events/s |
+| cpu.package_throttling | a dimension per package | events/s |
+| cpu.cpufreq | a dimension per cpu core | MHz |
+
+### Per cpu core
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cpu | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |
+| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |
+| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |
+| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `netdata.conf`.
+Configuration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.
+
+The file format is a modified INI syntax. The general structure is:
+
+```ini
+[section1]
+ option1 = some value
+ option2 = some other value
+
+[section2]
+ option3 = some third value
+```
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config netdata.conf
+```
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/system_uptime.md b/collectors/proc.plugin/integrations/system_uptime.md
new file mode 100644
index 000000000..7eedd4313
--- /dev/null
+++ b/collectors/proc.plugin/integrations/system_uptime.md
@@ -0,0 +1,107 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/system_uptime.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "System Uptime"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/System"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# System Uptime
+
+
+<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/uptime
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+The amount of time the system has been up (running).
+
+Uptime is a critical aspect of overall system performance:
+
+- **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes.
+- **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends.
+- **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems.
+- **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause.
+- **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others.
+- **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention.
+- **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability.
+
+
+
+
+This collector is only supported on the following platforms:
+
+- linux
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per System Uptime instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.uptime | uptime | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/wireless_network_interfaces.md b/collectors/proc.plugin/integrations/wireless_network_interfaces.md
new file mode 100644
index 000000000..57375b975
--- /dev/null
+++ b/collectors/proc.plugin/integrations/wireless_network_interfaces.md
@@ -0,0 +1,99 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/wireless_network_interfaces.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "Wireless network interfaces"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Network"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Wireless network interfaces
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/net/wireless
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor wireless devices with metrics about status, link quality, signal level, noise level and more.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per wireless device
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| wireless.status | status | status |
+| wireless.link_quality | link_quality | value |
+| wireless.signal_level | signal_level | dBm |
+| wireless.noise_level | noise_level | dBm |
+| wireless.discarded_packets | nwid, crypt, frag, retry, misc | packets/s |
+| wireless.missed_beacons | missed_beacons | frames/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md b/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md
new file mode 100644
index 000000000..d62d12ee6
--- /dev/null
+++ b/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md
@@ -0,0 +1,124 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "ZFS Adaptive Replacement Cache"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Filesystem/ZFS"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ZFS Adaptive Replacement Cache
+
+
+<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/spl/kstat/zfs/arcstats
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration monitors ZFS Adadptive Replacement Cache (ARC) statistics.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per ZFS Adaptive Replacement Cache instance
+
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| zfs.arc_size | arcsz, target, min, max | MiB |
+| zfs.l2_size | actual, size | MiB |
+| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |
+| zfs.bytes | read, write | KiB/s |
+| zfs.hits | hits, misses | percentage |
+| zfs.hits_rate | hits, misses | events/s |
+| zfs.dhits | hits, misses | percentage |
+| zfs.dhits_rate | hits, misses | events/s |
+| zfs.phits | hits, misses | percentage |
+| zfs.phits_rate | hits, misses | events/s |
+| zfs.mhits | hits, misses | percentage |
+| zfs.mhits_rate | hits, misses | events/s |
+| zfs.l2hits | hits, misses | percentage |
+| zfs.l2hits_rate | hits, misses | events/s |
+| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |
+| zfs.arc_size_breakdown | recent, frequent | percentage |
+| zfs.memory_ops | direct, throttled, indirect | operations/s |
+| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |
+| zfs.actual_hits | hits, misses | percentage |
+| zfs.actual_hits_rate | hits, misses | events/s |
+| zfs.demand_data_hits | hits, misses | percentage |
+| zfs.demand_data_hits_rate | hits, misses | events/s |
+| zfs.prefetch_data_hits | hits, misses | percentage |
+| zfs.prefetch_data_hits_rate | hits, misses | events/s |
+| zfs.hash_elements | current, max | elements |
+| zfs.hash_chains | current, max | chains |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/zfs_pools.md b/collectors/proc.plugin/integrations/zfs_pools.md
new file mode 100644
index 000000000..b913572e3
--- /dev/null
+++ b/collectors/proc.plugin/integrations/zfs_pools.md
@@ -0,0 +1,104 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/zfs_pools.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "ZFS Pools"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Filesystem/ZFS"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ZFS Pools
+
+
+<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /proc/spl/kstat/zfs
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This integration provides metrics about the state of ZFS pools.
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per zfs pool
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| pool | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| zfspool.state | online, degraded, faulted, offline, removed, unavail, suspended | boolean |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ zfs_pool_state_warn ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is degraded |
+| [ zfs_pool_state_crit ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is faulted or unavail |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/integrations/zram.md b/collectors/proc.plugin/integrations/zram.md
new file mode 100644
index 000000000..0bcda3eaf
--- /dev/null
+++ b/collectors/proc.plugin/integrations/zram.md
@@ -0,0 +1,105 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/zram.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
+sidebar_label: "ZRAM"
+learn_status: "Published"
+learn_rel_path: "Data Collection/Linux Systems/Memory"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ZRAM
+
+
+<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
+
+
+Plugin: proc.plugin
+Module: /sys/block/zram
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+zRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device.
+The data written to this block device is compressed and stored in memory.
+
+The collectors provides information about the operation and the effectiveness of zRAM on your system.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per zram device
+
+
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mem.zram_usage | compressed, metadata | MiB |
+| mem.zram_savings | savings, original | MiB |
+| mem.zram_ratio | ratio | ratio |
+| mem.zram_efficiency | percent | percentage |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+There is no configuration file.
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/collectors/proc.plugin/metadata.yaml b/collectors/proc.plugin/metadata.yaml
index 81d83f50e..45351b36f 100644
--- a/collectors/proc.plugin/metadata.yaml
+++ b/collectors/proc.plugin/metadata.yaml
@@ -2643,22 +2643,22 @@ modules:
os: "linux"
- name: inbound_packets_dropped_ratio
link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.packets
+ metric: net.drops
info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes
os: "linux"
- name: outbound_packets_dropped_ratio
link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.packets
+ metric: net.drops
info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes
os: "linux"
- name: wifi_inbound_packets_dropped_ratio
link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.packets
+ metric: net.drops
info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes
os: "linux"
- name: wifi_outbound_packets_dropped_ratio
link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.packets
+ metric: net.drops
info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes
os: "linux"
- name: 1m_received_packets_rate
@@ -2669,20 +2669,8 @@ modules:
- name: 10s_received_packets_storm
link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
metric: net.packets
- info:
- ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over
- the last minute
+ info: ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute
os: "linux freebsd"
- - name: inbound_packets_dropped
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.drops
- info: number of inbound dropped packets for the network interface ${label:device} in the last 10 minutes
- os: "linux"
- - name: outbound_packets_dropped
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.drops
- info: number of outbound dropped packets for the network interface ${label:device} in the last 10 minutes
- os: "linux"
- name: 10min_fifo_errors
link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
metric: net.fifo
@@ -3140,29 +3128,29 @@ modules:
os: "linux"
- name: tcp_connections
link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_conn.conf
- metric: ipv4.tcpsock
- info: IPv4 TCP connections utilization
+ metric: ip.tcpsock
+ info: TCP connections utilization
os: "linux"
- - name: 1m_ipv4_tcp_resets_sent
+ - name: 1m_ip_tcp_resets_sent
link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ipv4.tcphandshake
+ metric: ip.tcphandshake
info: average number of sent TCP RESETS over the last minute
os: "linux"
- - name: 10s_ipv4_tcp_resets_sent
+ - name: 10s_ip_tcp_resets_sent
link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ipv4.tcphandshake
+ metric: ip.tcphandshake
info:
average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has
crashed. Netdata will not send a clear notification for this alarm.
os: "linux"
- - name: 1m_ipv4_tcp_resets_received
+ - name: 1m_ip_tcp_resets_received
link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ipv4.tcphandshake
+ metric: ip.tcphandshake
info: average number of received TCP RESETS over the last minute
os: "linux freebsd"
- - name: 10s_ipv4_tcp_resets_received
+ - name: 10s_ip_tcp_resets_received
link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ipv4.tcphandshake
+ metric: ip.tcphandshake
info:
average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed.
Netdata will not send a clear notification for this alarm.
@@ -3189,57 +3177,12 @@ modules:
labels: []
metrics:
- name: system.ip
- description: IP Bandwidth
+ description: IPv4 Bandwidth
unit: "kilobits/s"
chart_type: area
dimensions:
- name: received
- name: sent
- - name: ip.inerrors
- description: IP Input Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: noroutes
- - name: truncated
- - name: checksum
- - name: ip.mcast
- description: IP Multicast Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ip.bcast
- description: IP Broadcast Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ip.mcastpkts
- description: IP Multicast Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ip.bcastpkts
- description: IP Broadcast Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ip.ecnpkts
- description: IP ECN Statistics
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: CEP
- - name: NoECTP
- - name: ECTP0
- - name: ECTP1
- name: ip.tcpmemorypressures
description: TCP Memory Pressures
unit: "events/s"
@@ -3297,31 +3240,52 @@ modules:
dimensions:
- name: overflows
- name: drops
- - name: ipv4.packets
- description: IPv4 Packets
+ - name: ip.tcpsock
+ description: IPv4 TCP Connections
+ unit: "active connections"
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: ip.tcppackets
+ description: IPv4 TCP Packets
unit: "packets/s"
chart_type: line
dimensions:
- name: received
- name: sent
- - name: forwarded
- - name: delivered
- - name: ipv4.fragsout
- description: IPv4 Fragments Sent
+ - name: ip.tcperrors
+ description: IPv4 TCP Errors
unit: "packets/s"
chart_type: line
dimensions:
- - name: ok
- - name: failed
- - name: created
- - name: ipv4.fragsin
- description: IPv4 Fragments Reassembly
+ - name: InErrs
+ - name: InCsumErrors
+ - name: RetransSegs
+ - name: ip.tcpopens
+ description: IPv4 TCP Opens
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: passive
+ - name: ip.tcphandshake
+ description: IPv4 TCP Handshake Issues
+ unit: "events/s"
+ chart_type: line
+ dimensions:
+ - name: EstabResets
+ - name: OutRsts
+ - name: AttemptFails
+ - name: SynRetrans
+ - name: ipv4.packets
+ description: IPv4 Packets
unit: "packets/s"
chart_type: line
dimensions:
- - name: ok
- - name: failed
- - name: all
+ - name: received
+ - name: sent
+ - name: forwarded
+ - name: delivered
- name: ipv4.errors
description: IPv4 Errors
unit: "packets/s"
@@ -3329,25 +3293,47 @@ modules:
dimensions:
- name: InDiscards
- name: OutDiscards
- - name: InHdrErrors
+ - name: InNoRoutes
- name: OutNoRoutes
+ - name: InHdrErrors
- name: InAddrErrors
- - name: InUnknownProtos
- - name: ipv4.icmp
- description: IPv4 ICMP Packets
+ - name: InTruncatedPkts
+ - name: InCsumErrors
+ - name: ipc4.bcast
+ description: IP Broadcast Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: ipv4.bcastpkts
+ description: IP Broadcast Packets
unit: "packets/s"
chart_type: line
dimensions:
- name: received
- name: sent
- - name: ipv4.icmp_errors
- description: IPv4 ICMP Errors
+ - name: ipv4.mcast
+ description: IPv4 Multicast Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: ipv4.mcastpkts
+ description: IP Multicast Packets
unit: "packets/s"
chart_type: line
dimensions:
- - name: InErrors
- - name: OutErrors
- - name: InCsumErrors
+ - name: received
+ - name: sent
+ - name: ipv4.icmp
+ description: IPv4 ICMP Packets
+ unit: "packets/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
- name: ipv4.icmpmsg
description: IPv4 ICMP Messages
unit: "packets/s"
@@ -3373,43 +3359,14 @@ modules:
- name: OutTimestamps
- name: InTimestampReps
- name: OutTimestampReps
- - name: ipv4.tcpsock
- description: IPv4 TCP Connections
- unit: "active connections"
- chart_type: line
- dimensions:
- - name: connections
- - name: ipv4.tcppackets
- description: IPv4 TCP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.tcperrors
- description: IPv4 TCP Errors
+ - name: ipv4.icmp_errors
+ description: IPv4 ICMP Errors
unit: "packets/s"
chart_type: line
dimensions:
- - name: InErrs
+ - name: InErrors
+ - name: OutErrors
- name: InCsumErrors
- - name: RetransSegs
- - name: ipv4.tcpopens
- description: IPv4 TCP Opens
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: active
- - name: passive
- - name: ipv4.tcphandshake
- description: IPv4 TCP Handshake Issues
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: EstabResets
- - name: OutRsts
- - name: AttemptFails
- - name: SynRetrans
- name: ipv4.udppackets
description: IPv4 UDP Packets
unit: "packets/s"
@@ -3446,6 +3403,31 @@ modules:
- name: NoPorts
- name: InCsumErrors
- name: IgnoredMulti
+ - name: ipv4.ecnpkts
+ description: IP ECN Statistics
+ unit: "packets/s"
+ chart_type: line
+ dimensions:
+ - name: CEP
+ - name: NoECTP
+ - name: ECTP0
+ - name: ECTP1
+ - name: ipv4.fragsin
+ description: IPv4 Fragments Reassembly
+ unit: "packets/s"
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: failed
+ - name: all
+ - name: ipv4.fragsout
+ description: IPv4 Fragments Sent
+ unit: "packets/s"
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: failed
+ - name: created
- name: system.ipv6
description: IPv6 Bandwidth
unit: "kilobits/s"
@@ -3453,7 +3435,7 @@ modules:
dimensions:
- name: received
- name: sent
- - name: system.ipv6
+ - name: ipv6.packets
description: IPv6 Packets
unit: "packets/s"
chart_type: line
@@ -3462,23 +3444,6 @@ modules:
- name: sent
- name: forwarded
- name: delivers
- - name: ipv6.fragsout
- description: IPv6 Fragments Sent
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: all
- - name: ipv6.fragsin
- description: IPv6 Fragments Reassembly
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: timeout
- - name: all
- name: ipv6.errors
description: IPv6 Errors
unit: "packets/s"
@@ -3493,6 +3458,27 @@ modules:
- name: InTruncatedPkts
- name: InNoRoutes
- name: OutNoRoutes
+ - name: ipv6.bcast
+ description: IPv6 Broadcast Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: ipv6.mcast
+ description: IPv6 Multicast Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: ipv6.mcastpkts
+ description: IPv6 Multicast Packets
+ unit: "packets/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
- name: ipv6.udppackets
description: IPv6 UDP Packets
unit: "packets/s"
@@ -3528,27 +3514,6 @@ modules:
- name: InErrors
- name: NoPorts
- name: InCsumErrors
- - name: ipv6.mcast
- description: IPv6 Multicast Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.bcast
- description: IPv6 Broadcast Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.mcastpkts
- description: IPv6 Multicast Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- name: ipv6.icmp
description: IPv6 ICMP Messages
unit: "messages/s"
@@ -3657,6 +3622,23 @@ modules:
- name: InECT1Pkts
- name: InECT0Pkts
- name: InCEPkts
+ - name: ipv6.fragsin
+ description: IPv6 Fragments Reassembly
+ unit: "packets/s"
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: failed
+ - name: timeout
+ - name: all
+ - name: ipv6.fragsout
+ description: IPv6 Fragments Sent
+ unit: "packets/s"
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: failed
+ - name: all
- meta:
plugin_name: proc.plugin
module_name: /proc/net/sockstat
@@ -3734,8 +3716,8 @@ modules:
description: ""
labels: []
metrics:
- - name: ipv4.sockstat_sockets
- description: IPv4 Sockets Used
+ - name: ip.sockstat_sockets
+ description: Sockets used for all address families
unit: "sockets"
chart_type: line
dimensions:
diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h
index a90f4838e..a0ddd76c4 100644
--- a/collectors/proc.plugin/plugin_proc.h
+++ b/collectors/proc.plugin/plugin_proc.h
@@ -58,7 +58,7 @@ void netdev_rename_device_add(
const char *host_device,
const char *container_device,
const char *container_name,
- DICTIONARY *labels,
+ RRDLABELS *labels,
const char *ctx_prefix);
void netdev_rename_device_del(const char *host_device);
diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c
index 09c6498e3..e65c42212 100644
--- a/collectors/proc.plugin/proc_diskstats.c
+++ b/collectors/proc.plugin/proc_diskstats.c
@@ -17,6 +17,11 @@
static struct disk {
char *disk; // the name of the disk (sda, sdb, etc, after being looked up)
char *device; // the device of the disk (before being looked up)
+ char *disk_by_id;
+ char *model;
+ char *serial;
+// bool rotational;
+// bool removable;
uint32_t hash;
unsigned long major;
unsigned long minor;
@@ -172,6 +177,8 @@ static char *path_to_sys_block_device = NULL;
static char *path_to_sys_block_device_bcache = NULL;
static char *path_to_sys_devices_virtual_block_device = NULL;
static char *path_to_device_mapper = NULL;
+static char *path_to_dev_disk = NULL;
+static char *path_to_sys_block = NULL;
static char *path_to_device_label = NULL;
static char *path_to_device_id = NULL;
static char *path_to_veritas_volume_groups = NULL;
@@ -469,6 +476,109 @@ static inline char *get_disk_name(unsigned long major, unsigned long minor, char
return strdup(result);
}
+static inline bool ends_with(const char *str, const char *suffix) {
+ if (!str || !suffix)
+ return false;
+
+ size_t len_str = strlen(str);
+ size_t len_suffix = strlen(suffix);
+ if (len_suffix > len_str)
+ return false;
+
+ return strncmp(str + len_str - len_suffix, suffix, len_suffix) == 0;
+}
+
+static inline char *get_disk_by_id(char *device) {
+ char pathname[256 + 1];
+ snprintfz(pathname, 256, "%s/by-id", path_to_dev_disk);
+
+ struct dirent *entry;
+ DIR *dp = opendir(pathname);
+ if (dp == NULL) {
+ internal_error(true, "Cannot open '%s'", pathname);
+ return NULL;
+ }
+
+ while ((entry = readdir(dp))) {
+ // We ignore the '.' and '..' entries
+ if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
+ continue;
+
+ if(strncmp(entry->d_name, "md-uuid-", 8) == 0 ||
+ strncmp(entry->d_name, "dm-uuid-", 8) == 0 ||
+ strncmp(entry->d_name, "nvme-eui.", 9) == 0 ||
+ strncmp(entry->d_name, "wwn-", 4) == 0 ||
+ strncmp(entry->d_name, "lvm-pv-uuid-", 12) == 0)
+ continue;
+
+ char link_target[256 + 1];
+ char full_path[256 + 1];
+ snprintfz(full_path, 256, "%s/%s", pathname, entry->d_name);
+
+ ssize_t len = readlink(full_path, link_target, 256);
+ if (len == -1)
+ continue;
+
+ link_target[len] = '\0';
+
+ if (ends_with(link_target, device)) {
+ char *s = strdupz(entry->d_name);
+ closedir(dp);
+ return s;
+ }
+ }
+
+ closedir(dp);
+ return NULL;
+}
+
+static inline char *get_disk_model(char *device) {
+ char path[256 + 1];
+ char buffer[256 + 1];
+
+ snprintfz(path, 256, "%s/%s/device/model", path_to_sys_block, device);
+ if(read_file(path, buffer, 256) != 0) {
+ snprintfz(path, 256, "%s/%s/device/name", path_to_sys_block, device);
+ if(read_file(path, buffer, 256) != 0)
+ return NULL;
+ }
+
+ return strdupz(buffer);
+}
+
+static inline char *get_disk_serial(char *device) {
+ char path[256 + 1];
+ char buffer[256 + 1];
+
+ snprintfz(path, 256, "%s/%s/device/serial", path_to_sys_block, device);
+ if(read_file(path, buffer, 256) != 0)
+ return NULL;
+
+ return strdupz(buffer);
+}
+
+//static inline bool get_disk_rotational(char *device) {
+// char path[256 + 1];
+// char buffer[256 + 1];
+//
+// snprintfz(path, 256, "%s/%s/queue/rotational", path_to_sys_block, device);
+// if(read_file(path, buffer, 256) != 0)
+// return false;
+//
+// return buffer[0] == '1';
+//}
+//
+//static inline bool get_disk_removable(char *device) {
+// char path[256 + 1];
+// char buffer[256 + 1];
+//
+// snprintfz(path, 256, "%s/%s/removable", path_to_sys_block, device);
+// if(read_file(path, buffer, 256) != 0)
+// return false;
+//
+// return buffer[0] == '1';
+//}
+
static void get_disk_config(struct disk *d) {
int def_enable = global_enable_new_disks_detected_at_runtime;
@@ -594,6 +704,11 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis
d->disk = get_disk_name(major, minor, disk);
d->device = strdupz(disk);
+ d->disk_by_id = get_disk_by_id(disk);
+ d->model = get_disk_model(disk);
+ d->serial = get_disk_serial(disk);
+// d->rotational = get_disk_rotational(disk);
+// d->removable = get_disk_removable(disk);
d->hash = simple_hash(d->device);
d->major = major;
d->minor = minor;
@@ -854,6 +969,11 @@ static struct disk *get_disk(unsigned long major, unsigned long minor, char *dis
static void add_labels_to_disk(struct disk *d, RRDSET *st) {
rrdlabels_add(st->rrdlabels, "device", d->disk, RRDLABEL_SRC_AUTO);
rrdlabels_add(st->rrdlabels, "mount_point", d->mount_point, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(st->rrdlabels, "id", d->disk_by_id, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(st->rrdlabels, "model", d->model, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(st->rrdlabels, "serial", d->serial, RRDLABEL_SRC_AUTO);
+// rrdlabels_add(st->rrdlabels, "rotational", d->rotational ? "true" : "false", RRDLABEL_SRC_AUTO);
+// rrdlabels_add(st->rrdlabels, "removable", d->removable ? "true" : "false", RRDLABEL_SRC_AUTO);
switch (d->type) {
default:
@@ -922,6 +1042,12 @@ int do_proc_diskstats(int update_every, usec_t dt) {
snprintfz(buffer, FILENAME_MAX, "%s/dev/mapper", netdata_configured_host_prefix);
path_to_device_mapper = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to device mapper", buffer);
+ snprintfz(buffer, FILENAME_MAX, "%s/dev/disk", netdata_configured_host_prefix);
+ path_to_dev_disk = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk", buffer);
+
+ snprintfz(buffer, FILENAME_MAX, "%s/sys/block", netdata_configured_host_prefix);
+ path_to_sys_block = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /sys/block", buffer);
+
snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-label", netdata_configured_host_prefix);
path_to_device_label = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-label", buffer);
@@ -2026,6 +2152,9 @@ int do_proc_diskstats(int update_every, usec_t dt) {
freez(t->disk);
freez(t->device);
+ freez(t->disk_by_id);
+ freez(t->model);
+ freez(t->serial);
freez(t->mount_point);
freez(t->chart_id);
freez(t);
diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c
index 16881d170..eb2d0e0c0 100644
--- a/collectors/proc.plugin/proc_net_dev.c
+++ b/collectors/proc.plugin/proc_net_dev.c
@@ -123,7 +123,7 @@ static struct netdev {
const char *chart_family;
- DICTIONARY *chart_labels;
+ RRDLABELS *chart_labels;
int flipped;
unsigned long priority;
@@ -348,7 +348,7 @@ static struct netdev_rename {
const char *container_name;
const char *ctx_prefix;
- DICTIONARY *chart_labels;
+ RRDLABELS *chart_labels;
int processed;
@@ -373,7 +373,7 @@ void netdev_rename_device_add(
const char *host_device,
const char *container_device,
const char *container_name,
- DICTIONARY *labels,
+ RRDLABELS *labels,
const char *ctx_prefix)
{
netdata_mutex_lock(&netdev_rename_mutex);
diff --git a/collectors/proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c
index ce3068c0e..170daad5d 100644
--- a/collectors/proc.plugin/proc_net_netstat.c
+++ b/collectors/proc.plugin/proc_net_netstat.c
@@ -2,9 +2,9 @@
#include "plugin_proc.h"
-#define RRD_TYPE_NET_NETSTAT "ip"
-#define RRD_TYPE_NET_SNMP "ipv4"
-#define RRD_TYPE_NET_SNMP6 "ipv6"
+#define RRD_TYPE_NET_IP "ip"
+#define RRD_TYPE_NET_IP4 "ipv4"
+#define RRD_TYPE_NET_IP6 "ipv6"
#define PLUGIN_PROC_MODULE_NETSTAT_NAME "/proc/net/netstat"
#define CONFIG_SECTION_PLUGIN_PROC_NETSTAT "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETSTAT_NAME
@@ -424,7 +424,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "packets"
, NULL
, "packets"
@@ -464,7 +464,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "fragsout"
, NULL
, "fragments6"
@@ -506,7 +506,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "fragsin"
, NULL
, "fragments6"
@@ -557,7 +557,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "errors"
, NULL
, "errors"
@@ -605,7 +605,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "udppackets"
, NULL
, "udp6"
@@ -647,7 +647,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "udperrors"
, NULL
, "udp6"
@@ -689,7 +689,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "udplitepackets"
, NULL
, "udplite6"
@@ -730,7 +730,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "udpliteerrors"
, NULL
, "udplite6"
@@ -771,7 +771,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "mcast"
, NULL
, "multicast6"
@@ -806,7 +806,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "bcast"
, NULL
, "broadcast6"
@@ -841,7 +841,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "mcastpkts"
, NULL
, "multicast6"
@@ -876,7 +876,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "icmp"
, NULL
, "icmp6"
@@ -910,7 +910,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "icmpredir"
, NULL
, "icmp6"
@@ -962,7 +962,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "icmperrors"
, NULL
, "icmp6"
@@ -1018,7 +1018,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "icmpechos"
, NULL
, "icmp6"
@@ -1064,7 +1064,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "groupmemb"
, NULL
, "icmp6"
@@ -1109,7 +1109,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "icmprouter"
, NULL
, "icmp6"
@@ -1151,7 +1151,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "icmpneighbor"
, NULL
, "icmp6"
@@ -1189,7 +1189,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "icmpmldv2"
, NULL
, "icmp6"
@@ -1239,7 +1239,7 @@ static void do_proc_net_snmp6(int update_every) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6
+ RRD_TYPE_NET_IP6
, "icmptypes"
, NULL
, "icmp6"
@@ -1287,7 +1287,7 @@ static void do_proc_net_snmp6(int update_every) {
if (unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP6,
+ RRD_TYPE_NET_IP6,
"ect",
NULL,
"packets",
@@ -1852,11 +1852,11 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_system_ip)) {
st_system_ip = rrdset_create_localhost(
"system"
- , RRD_TYPE_NET_NETSTAT
+ , "ip" // FIXME: this is ipv4. Not changing it because it will require to do changes in cloud-frontend too
, NULL
, "network"
, NULL
- , "IP Bandwidth"
+ , "IPv4 Bandwidth"
, "kilobits/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
@@ -1874,43 +1874,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
rrdset_done(st_system_ip);
}
- if(do_inerrors == CONFIG_BOOLEAN_YES || (do_inerrors == CONFIG_BOOLEAN_AUTO &&
- (ipext_InNoRoutes ||
- ipext_InTruncatedPkts ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_inerrors = CONFIG_BOOLEAN_YES;
- static RRDSET *st_ip_inerrors = NULL;
- static RRDDIM *rd_noroutes = NULL, *rd_truncated = NULL, *rd_checksum = NULL;
-
- if(unlikely(!st_ip_inerrors)) {
- st_ip_inerrors = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
- , "inerrors"
- , NULL
- , "errors"
- , NULL
- , "IP Input Errors"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_ip_inerrors, RRDSET_FLAG_DETAIL);
-
- rd_noroutes = rrddim_add(st_ip_inerrors, "InNoRoutes", "noroutes", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_truncated = rrddim_add(st_ip_inerrors, "InTruncatedPkts", "truncated", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_checksum = rrddim_add(st_ip_inerrors, "InCsumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ip_inerrors, rd_noroutes, ipext_InNoRoutes);
- rrddim_set_by_pointer(st_ip_inerrors, rd_truncated, ipext_InTruncatedPkts);
- rrddim_set_by_pointer(st_ip_inerrors, rd_checksum, ipext_InCsumErrors);
- rrdset_done(st_ip_inerrors);
- }
-
if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO &&
(ipext_InMcastOctets ||
ipext_OutMcastOctets ||
@@ -1921,7 +1884,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_ip_mcast)) {
st_ip_mcast = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP4
, "mcast"
, NULL
, "multicast"
@@ -1930,7 +1893,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "kilobits/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_MCAST
+ , NETDATA_CHART_PRIO_IPV4_MCAST
, update_every
, RRDSET_TYPE_AREA
);
@@ -1960,16 +1923,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_ip_bcast)) {
st_ip_bcast = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP4
, "bcast"
, NULL
, "broadcast"
, NULL
- , "IP Broadcast Bandwidth"
+ , "IPv4 Broadcast Bandwidth"
, "kilobits/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_BCAST
+ , NETDATA_CHART_PRIO_IPV4_BCAST
, update_every
, RRDSET_TYPE_AREA
);
@@ -1999,16 +1962,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_ip_mcastpkts)) {
st_ip_mcastpkts = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP4
, "mcastpkts"
, NULL
, "multicast"
, NULL
- , "IP Multicast Packets"
+ , "IPv4 Multicast Packets"
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_MCAST_PACKETS
+ , NETDATA_CHART_PRIO_IPV4_MCAST_PACKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -2035,16 +1998,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_ip_bcastpkts)) {
st_ip_bcastpkts = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP4
, "bcastpkts"
, NULL
, "broadcast"
, NULL
- , "IP Broadcast Packets"
+ , "IPv4 Broadcast Packets"
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_BCAST_PACKETS
+ , NETDATA_CHART_PRIO_IPV4_BCAST_PACKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -2073,16 +2036,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_ecnpkts)) {
st_ecnpkts = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP4
, "ecnpkts"
, NULL
, "ecn"
, NULL
- , "IP ECN Statistics"
+ , "IPv4 ECN Statistics"
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_ECN
+ , NETDATA_CHART_PRIO_IPV4_ECN
, update_every
, RRDSET_TYPE_LINE
);
@@ -2114,7 +2077,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_tcpmemorypressures)) {
st_tcpmemorypressures = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP
, "tcpmemorypressures"
, NULL
, "tcp"
@@ -2123,7 +2086,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "events/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_MEM
+ , NETDATA_CHART_PRIO_IP_TCP_MEM_PRESSURE
, update_every
, RRDSET_TYPE_LINE
);
@@ -2150,7 +2113,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_tcpconnaborts)) {
st_tcpconnaborts = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP
, "tcpconnaborts"
, NULL
, "tcp"
@@ -2194,7 +2157,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_tcpreorders)) {
st_tcpreorders = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP
, "tcpreorders"
, NULL
, "tcp"
@@ -2236,7 +2199,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_ip_tcpofo)) {
st_ip_tcpofo = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP
, "tcpofo"
, NULL
, "tcp"
@@ -2276,7 +2239,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_syncookies)) {
st_syncookies = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP
, "tcpsyncookies"
, NULL
, "tcp"
@@ -2315,7 +2278,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_syn_queue)) {
st_syn_queue = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP
, "tcp_syn_queue"
, NULL
, "tcp"
@@ -2351,7 +2314,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_accept_queue)) {
st_accept_queue = rrdset_create_localhost(
- RRD_TYPE_NET_NETSTAT
+ RRD_TYPE_NET_IP
, "tcp_accept_queue"
, NULL
, "tcp"
@@ -2392,7 +2355,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "packets"
, NULL
, "packets"
@@ -2433,7 +2396,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "fragsout"
, NULL
, "fragments"
@@ -2442,7 +2405,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_FRAGMENTS
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_OUT
, update_every
, RRDSET_TYPE_LINE
);
@@ -2473,7 +2436,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "fragsin"
, NULL
, "fragments"
@@ -2482,7 +2445,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_FRAGMENTS + 1
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_IN
, update_every
, RRDSET_TYPE_LINE
);
@@ -2513,13 +2476,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
static RRDDIM *rd_InDiscards = NULL,
*rd_OutDiscards = NULL,
*rd_InHdrErrors = NULL,
+ *rd_InNoRoutes = NULL,
*rd_OutNoRoutes = NULL,
*rd_InAddrErrors = NULL,
+ *rd_InTruncatedPkts = NULL,
+ *rd_InCsumErrors = NULL,
*rd_InUnknownProtos = NULL;
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "errors"
, NULL
, "errors"
@@ -2537,11 +2503,14 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InNoRoutes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InTruncatedPkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
rrddim_set_by_pointer(st, rd_InDiscards, (collected_number)snmp_root.ip_InDiscards);
@@ -2549,7 +2518,10 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
rrddim_set_by_pointer(st, rd_InHdrErrors, (collected_number)snmp_root.ip_InHdrErrors);
rrddim_set_by_pointer(st, rd_InAddrErrors, (collected_number)snmp_root.ip_InAddrErrors);
rrddim_set_by_pointer(st, rd_InUnknownProtos, (collected_number)snmp_root.ip_InUnknownProtos);
+ rrddim_set_by_pointer(st, rd_InNoRoutes, (collected_number)ipext_InNoRoutes);
rrddim_set_by_pointer(st, rd_OutNoRoutes, (collected_number)snmp_root.ip_OutNoRoutes);
+ rrddim_set_by_pointer(st, rd_InTruncatedPkts, (collected_number)ipext_InTruncatedPkts);
+ rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)ipext_InCsumErrors);
rrdset_done(st);
}
@@ -2571,7 +2543,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_packets)) {
st_packets = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "icmp"
, NULL
, "icmp"
@@ -2580,7 +2552,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_ICMP
+ , NETDATA_CHART_PRIO_IPV4_ICMP_PACKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -2602,7 +2574,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st_errors)) {
st_errors = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "icmp_errors"
, NULL
, "icmp"
@@ -2611,7 +2583,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_ICMP + 1
+ , NETDATA_CHART_PRIO_IPV4_ICMP_ERRORS
, update_every
, RRDSET_TYPE_LINE
);
@@ -2678,7 +2650,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "icmpmsg"
, NULL
, "icmp"
@@ -2687,7 +2659,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_ICMP + 2
+ , NETDATA_CHART_PRIO_IPV4_ICMP_MESSAGES
, update_every
, RRDSET_TYPE_LINE
);
@@ -2754,16 +2726,16 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP
, "tcpsock"
, NULL
, "tcp"
, NULL
- , "IPv4 TCP Connections"
+ , "TCP Connections"
, "active connections"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_TCP
+ , NETDATA_CHART_PRIO_IP_TCP_ESTABLISHED_CONNS
, update_every
, RRDSET_TYPE_LINE
);
@@ -2787,7 +2759,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP
, "tcppackets"
, NULL
, "tcp"
@@ -2796,7 +2768,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_TCP + 4
+ , NETDATA_CHART_PRIO_IP_TCP_PACKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -2826,7 +2798,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP
, "tcperrors"
, NULL
, "tcp"
@@ -2835,7 +2807,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_TCP + 20
+ , NETDATA_CHART_PRIO_IP_TCP_ERRORS
, update_every
, RRDSET_TYPE_LINE
);
@@ -2864,7 +2836,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP
, "tcpopens"
, NULL
, "tcp"
@@ -2873,7 +2845,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "connections/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_TCP + 5
+ , NETDATA_CHART_PRIO_IP_TCP_OPENS
, update_every
, RRDSET_TYPE_LINE
);
@@ -2903,7 +2875,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP
, "tcphandshake"
, NULL
, "tcp"
@@ -2912,7 +2884,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "events/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_TCP + 30
+ , NETDATA_CHART_PRIO_IP_TCP_HANDSHAKE
, update_every
, RRDSET_TYPE_LINE
);
@@ -2946,7 +2918,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "udppackets"
, NULL
, "udp"
@@ -2955,7 +2927,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDP
+ , NETDATA_CHART_PRIO_IPV4_UDP_PACKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -2991,7 +2963,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "udperrors"
, NULL
, "udp"
@@ -3000,7 +2972,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "events/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDP + 10
+ , NETDATA_CHART_PRIO_IPV4_UDP_ERRORS
, update_every
, RRDSET_TYPE_LINE
);
@@ -3044,7 +3016,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "udplite"
, NULL
, "udplite"
@@ -3053,7 +3025,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDPLITE
+ , NETDATA_CHART_PRIO_IPV4_UDPLITE_PACKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -3078,7 +3050,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- RRD_TYPE_NET_SNMP
+ RRD_TYPE_NET_IP4
, "udplite_errors"
, NULL
, "udplite"
@@ -3087,7 +3059,7 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, "packets/s"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDPLITE + 10
+ , NETDATA_CHART_PRIO_IPV4_UDPLITE_ERRORS
, update_every
, RRDSET_TYPE_LINE);
diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c
index e94b891ca..b0feab5fa 100644
--- a/collectors/proc.plugin/proc_net_sockstat.c
+++ b/collectors/proc.plugin/proc_net_sockstat.c
@@ -228,16 +228,16 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
if(unlikely(!st)) {
st = rrdset_create_localhost(
- "ipv4"
+ "ip"
, "sockstat_sockets"
, NULL
, "sockets"
, NULL
- , "IPv4 Sockets Used"
+ , "Sockets used for all address families"
, "sockets"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_SOCKETS
+ , NETDATA_CHART_PRIO_IP_SOCKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -272,7 +272,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, NULL
, "tcp"
, NULL
- , "IPv4 TCP Sockets"
+ , "TCP Sockets"
, "sockets"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
@@ -310,11 +310,11 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, NULL
, "tcp"
, NULL
- , "IPv4 TCP Sockets Memory"
+ , "TCP Sockets Memory"
, "KiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_TCP_MEM
+ , NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS_MEM
, update_every
, RRDSET_TYPE_AREA
);
@@ -347,7 +347,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, "sockets"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDP
+ , NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -380,7 +380,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, "KiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDP_MEM
+ , NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS_MEM
, update_every
, RRDSET_TYPE_AREA
);
@@ -413,7 +413,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, "sockets"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDPLITE
+ , NETDATA_CHART_PRIO_IPV4_UDPLITE_SOCKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -479,7 +479,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, "fragments"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_FRAGMENTS
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -512,7 +512,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
, "KiB"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_MEM
+ , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS_MEM
, update_every
, RRDSET_TYPE_AREA
);
diff --git a/collectors/proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c
index 065cf6055..16e0248af 100644
--- a/collectors/proc.plugin/proc_net_sockstat6.c
+++ b/collectors/proc.plugin/proc_net_sockstat6.c
@@ -130,7 +130,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) {
, "sockets"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_TCP
+ , NETDATA_CHART_PRIO_IPV6_TCP_SOCKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -163,7 +163,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) {
, "sockets"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_UDP
+ , NETDATA_CHART_PRIO_IPV6_UDP_SOCKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -196,7 +196,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) {
, "sockets"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_UDPLITE
+ , NETDATA_CHART_PRIO_IPV6_UDPLITE_SOCKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -229,7 +229,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) {
, "sockets"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_RAW
+ , NETDATA_CHART_PRIO_IPV6_RAW_SOCKETS
, update_every
, RRDSET_TYPE_LINE
);
@@ -262,7 +262,7 @@ int do_proc_net_sockstat6(int update_every, usec_t dt) {
, "fragments"
, PLUGIN_PROC_NAME
, PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_FRAGMENTS
+ , NETDATA_CHART_PRIO_IPV6_FRAGMENTS_SOCKETS
, update_every
, RRDSET_TYPE_LINE
);
diff --git a/collectors/proc.plugin/sys_devices_pci_aer.c b/collectors/proc.plugin/sys_devices_pci_aer.c
index 134426238..296195182 100644
--- a/collectors/proc.plugin/sys_devices_pci_aer.c
+++ b/collectors/proc.plugin/sys_devices_pci_aer.c
@@ -268,6 +268,11 @@ int do_proc_sys_devices_pci_aer(int update_every, usec_t dt __maybe_unused) {
title = "PCI Root-Port Advanced Error Reporting (AER) Fatal Errors";
context = "pci.rootport_aer_fatal";
break;
+
+ default:
+ title = "Unknown PCI Advanced Error Reporting";
+ context = "pci.unknown_aer";
+ break;
}
char id[RRD_ID_LENGTH_MAX + 1];
diff --git a/collectors/proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c
index 0947f61f0..fdaa22cb7 100644
--- a/collectors/proc.plugin/sys_devices_system_edac_mc.c
+++ b/collectors/proc.plugin/sys_devices_system_edac_mc.c
@@ -265,22 +265,22 @@ int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt __maybe_unuse
char buffer[1024 + 1];
- if(read_edac_mc_rank_file(m->name, d->name, "dimm_dev_type", buffer, 1024))
+ if (read_edac_mc_rank_file(m->name, d->name, "dimm_dev_type", buffer, 1024))
rrdlabels_add(d->st->rrdlabels, "dimm_dev_type", buffer, RRDLABEL_SRC_AUTO);
- if(read_edac_mc_rank_file(m->name, d->name, "dimm_edac_mode", buffer, 1024))
+ if (read_edac_mc_rank_file(m->name, d->name, "dimm_edac_mode", buffer, 1024))
rrdlabels_add(d->st->rrdlabels, "dimm_edac_mode", buffer, RRDLABEL_SRC_AUTO);
- if(read_edac_mc_rank_file(m->name, d->name, "dimm_label", buffer, 1024))
+ if (read_edac_mc_rank_file(m->name, d->name, "dimm_label", buffer, 1024))
rrdlabels_add(d->st->rrdlabels, "dimm_label", buffer, RRDLABEL_SRC_AUTO);
- if(read_edac_mc_rank_file(m->name, d->name, "dimm_location", buffer, 1024))
+ if (read_edac_mc_rank_file(m->name, d->name, "dimm_location", buffer, 1024))
rrdlabels_add(d->st->rrdlabels, "dimm_location", buffer, RRDLABEL_SRC_AUTO);
- if(read_edac_mc_rank_file(m->name, d->name, "dimm_mem_type", buffer, 1024))
+ if (read_edac_mc_rank_file(m->name, d->name, "dimm_mem_type", buffer, 1024))
rrdlabels_add(d->st->rrdlabels, "dimm_mem_type", buffer, RRDLABEL_SRC_AUTO);
- if(read_edac_mc_rank_file(m->name, d->name, "size", buffer, 1024))
+ if (read_edac_mc_rank_file(m->name, d->name, "size", buffer, 1024))
rrdlabels_add(d->st->rrdlabels, "size", buffer, RRDLABEL_SRC_AUTO);
d->ce.rd = rrddim_add(d->st, "correctable", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);